diff --git a/.github/workflows/pd-tests.yaml b/.github/workflows/pd-tests.yaml index 73e31fd4ad1..262f38469d8 100644 --- a/.github/workflows/pd-tests.yaml +++ b/.github/workflows/pd-tests.yaml @@ -55,24 +55,24 @@ jobs: with: name: cover-reports path: covprofile_${{ matrix.worker_id }} - report-coverage: - needs: chunks - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Download chunk report - uses: actions/download-artifact@v2 - with: - name: cover-reports - - name: Merge - env: - TOTAL_JOBS: ${{needs.chunks.outputs.job-total}} - run: for i in $(seq 1 $TOTAL_JOBS); do cat covprofile_$i >> covprofile; done - - name: Send coverage - uses: codecov/codecov-action@v1 - with: - token: ${{ secrets.CODECOV }} - file: ./covprofile - flags: unittests - name: codecov-umbrella + # report-coverage: + # needs: chunks + # runs-on: ubuntu-latest + # steps: + # - name: Checkout code + # uses: actions/checkout@v3 + # - name: Download chunk report + # uses: actions/download-artifact@v2 + # with: + # name: cover-reports + # - name: Merge + # env: + # TOTAL_JOBS: ${{needs.chunks.outputs.job-total}} + # run: for i in $(seq 1 $TOTAL_JOBS); do cat covprofile_$i >> covprofile; done + # - name: Send coverage + # uses: codecov/codecov-action@v1 + # with: + # token: ${{ secrets.CODECOV }} + # file: ./covprofile + # flags: unittests + # name: codecov-umbrella diff --git a/pkg/utils/testutil/api_check.go b/pkg/utils/testutil/api_check.go index 4ce5e859f3f..ea91654b149 100644 --- a/pkg/utils/testutil/api_check.go +++ b/pkg/utils/testutil/api_check.go @@ -114,6 +114,21 @@ func CheckGetJSON(client *http.Client, url string, data []byte, checkOpts ...fun return checkResp(resp, checkOpts...) } +// CheckGetUntilStatusCode is used to do get request and do check options. +func CheckGetUntilStatusCode(re *require.Assertions, client *http.Client, url string, code int) error { + var err error + Eventually(re, func() bool { + resp, err2 := apiutil.GetJSON(client, url, nil) + if err2 != nil { + err = err2 + return true + } + defer resp.Body.Close() + return resp.StatusCode == code + }) + return err +} + // CheckPatchJSON is used to do patch request and do check options. func CheckPatchJSON(client *http.Client, url string, data []byte, checkOpts ...func([]byte, int, http.Header)) error { resp, err := apiutil.PatchJSON(client, url, data) diff --git a/server/api/admin_test.go b/server/api/admin_test.go index 09130fd8385..f9b866af952 100644 --- a/server/api/admin_test.go +++ b/server/api/admin_test.go @@ -18,6 +18,7 @@ import ( "context" "encoding/json" "fmt" + "io" "net/http" "testing" "time" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/replication" + "github.com/tikv/pd/pkg/utils/apiutil" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" ) @@ -188,9 +190,24 @@ func (suite *adminTestSuite) TestResetTS() { values, err := json.Marshal(args) suite.NoError(err) re := suite.Require() - err = tu.CheckPostJSON(testDialClient, url, values, - tu.StatusOK(re), - tu.StringEqual(re, "\"Reset ts successfully.\"\n")) + tu.Eventually(re, func() bool { + resp, err := apiutil.PostJSON(testDialClient, url, values) + re.NoError(err) + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + re.NoError(err) + switch resp.StatusCode { + case http.StatusOK: + re.Contains(string(b), "Reset ts successfully.") + return true + case http.StatusForbidden: + re.Contains(string(b), "[PD:etcd:ErrEtcdTxnConflict]etcd transaction failed, conflicted and rolled back") + return false + default: + re.FailNow("unexpected status code %d", resp.StatusCode) + return false + } + }) suite.NoError(err) t2 := makeTS(32 * time.Hour) args["tso"] = fmt.Sprintf("%d", t2) diff --git a/server/api/diagnostic_test.go b/server/api/diagnostic_test.go index 1774c221539..4e08426ea43 100644 --- a/server/api/diagnostic_test.go +++ b/server/api/diagnostic_test.go @@ -17,6 +17,7 @@ package api import ( "encoding/json" "fmt" + "net/http" "testing" "time" @@ -63,6 +64,8 @@ func (suite *diagnosticTestSuite) TearDownSuite() { func (suite *diagnosticTestSuite) checkStatus(status string, url string) { re := suite.Require() + err := tu.CheckGetUntilStatusCode(re, testDialClient, url, http.StatusOK) + suite.NoError(err) suite.Eventually(func() bool { result := &schedulers.DiagnosticResult{} err := tu.ReadGetJSON(re, testDialClient, url, result) diff --git a/tests/integrations/mcs/scheduling/config_test.go b/tests/integrations/mcs/scheduling/config_test.go index 42ba051eb84..06d73caf130 100644 --- a/tests/integrations/mcs/scheduling/config_test.go +++ b/tests/integrations/mcs/scheduling/config_test.go @@ -93,6 +93,9 @@ func (suite *configTestSuite) TestConfigWatch() { re.Equal(sc.DefaultSplitMergeInterval, watcher.GetScheduleConfig().SplitMergeInterval.Duration) re.Equal("0.0.0", watcher.GetClusterVersion().String()) // Update the config and check if the scheduling config watcher can get the latest value. + testutil.Eventually(re, func() bool { + return watcher.GetReplicationConfig().MaxReplicas == 3 + }) persistOpts := suite.pdLeaderServer.GetPersistOptions() persistOpts.SetMaxReplicas(5) persistConfig(re, suite.pdLeaderServer) diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 2cc8427911a..315ec3cf7c7 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -315,12 +315,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { re.Contains(string(output), "Success!") // test show - var rules []placement.Rule - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "show") - re.NoError(err) - re.NoError(json.Unmarshal(output, &rules)) - re.Len(rules, 1) - re.Equal([2]string{"pd", "default"}, rules[0].Key()) + suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "default"}}) f, _ := os.CreateTemp("/tmp", "pd_tests") fname := f.Name() @@ -328,12 +323,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { defer os.RemoveAll(fname) // test load - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "load", "--out="+fname) - re.NoError(err) - b, _ := os.ReadFile(fname) - re.NoError(json.Unmarshal(b, &rules)) - re.Len(rules, 1) - re.Equal([2]string{"pd", "default"}, rules[0].Key()) + rules := suite.checkLoadRule(pdAddr, fname, [][2]string{{"pd", "default"}}) // test save rules = append(rules, placement.Rule{ @@ -347,42 +337,26 @@ func (suite *configTestSuite) checkPlacementRules(cluster *tests.TestCluster) { Role: "voter", Count: 2, }) - b, _ = json.Marshal(rules) + b, _ := json.Marshal(rules) os.WriteFile(fname, b, 0600) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) re.NoError(err) // test show group - var rules2 []placement.Rule - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "show", "--group=pd") - re.NoError(err) - re.NoError(json.Unmarshal(output, &rules2)) - re.Len(rules2, 2) - re.Equal([2]string{"pd", "default"}, rules2[0].Key()) - re.Equal([2]string{"pd", "test1"}, rules2[1].Key()) + suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "default"}, {"pd", "test1"}}, "--group=pd") // test rule region detail tests.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b")) - fit := &placement.RegionFit{} - // need clear up args, so create new a cobra.Command. Otherwise gourp still exists. - cmd2 := pdctlCmd.GetRootCmd() - output, err = pdctl.ExecuteCommand(cmd2, "-u", pdAddr, "config", "placement-rules", "show", "--region=1", "--detail") - re.NoError(err) - re.NoError(json.Unmarshal(output, fit)) - re.Len(fit.RuleFits, 3) - re.Equal([2]string{"pd", "default"}, fit.RuleFits[0].Rule.Key()) + suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "default"}}, "--region=1", "--detail") // test delete + // need clear up args, so create new a cobra.Command. Otherwise gourp still exists. rules[0].Count = 0 b, _ = json.Marshal(rules) os.WriteFile(fname, b, 0600) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) re.NoError(err) - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "show", "--group=pd") - re.NoError(err) - re.NoError(json.Unmarshal(output, &rules)) - re.Len(rules, 1) - re.Equal([2]string{"pd", "test1"}, rules[0].Key()) + suite.checkShowRuleKey(pdAddr, [][2]string{{"pd", "test1"}}, "--group=pd") } func (suite *configTestSuite) TestPlacementRuleGroups() { @@ -431,14 +405,16 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluste // show all var groups []placement.RuleGroup - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show") - re.NoError(err) - re.NoError(json.Unmarshal(output, &groups)) - re.Equal([]placement.RuleGroup{ - {ID: "pd", Index: 42, Override: true}, - {ID: "group2", Index: 100, Override: false}, - {ID: "group3", Index: 200, Override: false}, - }, groups) + testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show") + re.NoError(err) + re.NoError(json.Unmarshal(output, &groups)) + return reflect.DeepEqual([]placement.RuleGroup{ + {ID: "pd", Index: 42, Override: true}, + {ID: "group2", Index: 100, Override: false}, + {ID: "group3", Index: 200, Override: false}, + }, groups) + }) // delete output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "delete", "group2") @@ -446,17 +422,21 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *tests.TestCluste re.Contains(string(output), "Delete group and rules successfully.") // show again - output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "group2") - re.NoError(err) - re.Contains(string(output), "404") + testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "group2") + re.NoError(err) + return strings.Contains(string(output), "404") + }) // delete using regex _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "delete", "--regexp", ".*3") re.NoError(err) - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "group3") - re.NoError(err) - re.Contains(string(output), "404") + testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-group", "show", "group3") + re.NoError(err) + return strings.Contains(string(output), "404") + }) } func (suite *configTestSuite) TestPlacementRuleBundle() { @@ -496,28 +476,19 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste defer os.RemoveAll(fname) // test load - var bundles []placement.GroupBundle - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - re.NoError(err) - b, _ := os.ReadFile(fname) - re.NoError(json.Unmarshal(b, &bundles)) - re.Len(bundles, 1) - re.Equal(placement.GroupBundle{ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, bundles[0]) + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ + {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, + }) // test set bundle.ID = "pe" bundle.Rules[0].GroupID = "pe" - b, err = json.Marshal(bundle) + b, err := json.Marshal(bundle) re.NoError(err) re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) - - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - re.NoError(err) - b, _ = os.ReadFile(fname) - re.NoError(json.Unmarshal(b, &bundles)) - assertBundles(re, bundles, []placement.GroupBundle{ + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: "default", Role: "voter", Count: 3}}}, {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, }) @@ -526,11 +497,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "pd") re.NoError(err) - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - re.NoError(err) - b, _ = os.ReadFile(fname) - re.NoError(json.Unmarshal(b, &bundles)) - assertBundles(re, bundles, []placement.GroupBundle{ + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, }) @@ -542,17 +509,18 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ + {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, + {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, + }) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "--regexp", ".*f") re.NoError(err) - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - re.NoError(err) - b, _ = os.ReadFile(fname) - re.NoError(json.Unmarshal(b, &bundles)) - assertBundles(re, bundles, []placement.GroupBundle{ + bundles := []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, - }) + } + suite.checkLoadRuleBundle(pdAddr, fname, bundles) // test save bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}} @@ -562,13 +530,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste re.NoError(os.WriteFile(fname, b, 0600)) _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname) re.NoError(err) - - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - re.NoError(err) - b, err = os.ReadFile(fname) - re.NoError(err) - re.NoError(json.Unmarshal(b, &bundles)) - assertBundles(re, bundles, []placement.GroupBundle{ + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: "default", Role: "voter", Count: 3}}}, {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, }) @@ -581,16 +543,67 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *tests.TestCluste _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname, "--partial") re.NoError(err) - _, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) - re.NoError(err) - b, err = os.ReadFile(fname) - re.NoError(err) - re.NoError(json.Unmarshal(b, &bundles)) - assertBundles(re, bundles, []placement.GroupBundle{ + suite.checkLoadRuleBundle(pdAddr, fname, []placement.GroupBundle{ {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: "default", Role: "voter", Count: 3}}}, }) } +func (suite *configTestSuite) checkLoadRuleBundle(pdAddr string, fname string, expectValues []placement.GroupBundle) { + var bundles []placement.GroupBundle + cmd := pdctlCmd.GetRootCmd() + testutil.Eventually(suite.Require(), func() bool { // wait for the config to be synced to the scheduling server + _, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "load", "--out="+fname) + suite.NoError(err) + b, _ := os.ReadFile(fname) + suite.NoError(json.Unmarshal(b, &bundles)) + return len(bundles) == len(expectValues) + }) + assertBundles(suite.Require(), bundles, expectValues) +} + +func (suite *configTestSuite) checkLoadRule(pdAddr string, fname string, expectValues [][2]string) []placement.Rule { + var rules []placement.Rule + cmd := pdctlCmd.GetRootCmd() + testutil.Eventually(suite.Require(), func() bool { // wait for the config to be synced to the scheduling server + _, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "load", "--out="+fname) + suite.NoError(err) + b, _ := os.ReadFile(fname) + suite.NoError(json.Unmarshal(b, &rules)) + return len(rules) == len(expectValues) + }) + for i, v := range expectValues { + suite.Equal(v, rules[i].Key()) + } + return rules +} + +func (suite *configTestSuite) checkShowRuleKey(pdAddr string, expectValues [][2]string, opts ...string) { + var rules []placement.Rule + var fit placement.RegionFit + cmd := pdctlCmd.GetRootCmd() + testutil.Eventually(suite.Require(), func() bool { // wait for the config to be synced to the scheduling server + args := []string{"-u", pdAddr, "config", "placement-rules", "show"} + output, err := pdctl.ExecuteCommand(cmd, append(args, opts...)...) + suite.NoError(err) + err = json.Unmarshal(output, &rules) + if err == nil { + return len(rules) == len(expectValues) + } + suite.NoError(json.Unmarshal(output, &fit)) + return len(fit.RuleFits) != 0 + }) + if len(rules) != 0 { + for i, v := range expectValues { + suite.Equal(v, rules[i].Key()) + } + } + if len(fit.RuleFits) != 0 { + for i, v := range expectValues { + suite.Equal(v, fit.RuleFits[i].Rule.Key()) + } + } +} + func TestReplicationMode(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 3ee3357e031..6d292021767 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -210,8 +210,10 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { var resp placement.Rule url := fmt.Sprintf("%s/rule/%s/%s", urlPrefix, testCase.rule.GroupID, testCase.rule.ID) if testCase.found { - err = tu.ReadGetJSON(re, testDialClient, url, &resp) - suite.compareRule(&resp, &testCase.rule) + tu.Eventually(suite.Require(), func() bool { + err = tu.ReadGetJSON(re, testDialClient, url, &resp) + return suite.compareRule(&resp, &testCase.rule) + }) } else { err = tu.CheckGetJSON(testDialClient, url, nil, tu.Status(re, testCase.code)) } @@ -421,13 +423,17 @@ func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { suite.T().Log(testCase.name) var resp []*placement.Rule url := fmt.Sprintf("%s/rules/group/%s", urlPrefix, testCase.groupID) - err = tu.ReadGetJSON(re, testDialClient, url, &resp) - suite.NoError(err) - suite.Len(resp, testCase.count) - if testCase.count == 2 { - suite.compareRule(resp[0], &rule) - suite.compareRule(resp[1], &rule1) - } + tu.Eventually(re, func() bool { + err = tu.ReadGetJSON(re, testDialClient, url, &resp) + suite.NoError(err) + if len(resp) != testCase.count { + return false + } + if testCase.count == 2 { + return suite.compareRule(resp[0], &rule) && suite.compareRule(resp[1], &rule1) + } + return true + }) } } @@ -487,12 +493,15 @@ func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { url := fmt.Sprintf("%s/rules/region/%s", urlPrefix, testCase.regionID) if testCase.success { - err = tu.ReadGetJSON(re, testDialClient, url, &resp) - for _, r := range resp { - if r.GroupID == "e" { - suite.compareRule(r, &rule) + tu.Eventually(suite.Require(), func() bool { + err = tu.ReadGetJSON(re, testDialClient, url, &resp) + for _, r := range resp { + if r.GroupID == "e" { + return suite.compareRule(r, &rule) + } } - } + return true + }) } else { err = tu.CheckGetJSON(testDialClient, url, nil, tu.Status(re, testCase.code)) } @@ -956,22 +965,26 @@ func (suite *ruleTestSuite) checkBundleBadRequest(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) compareBundle(b1, b2 placement.GroupBundle) { - suite.Equal(b2.ID, b1.ID) - suite.Equal(b2.Index, b1.Index) - suite.Equal(b2.Override, b1.Override) - suite.Len(b2.Rules, len(b1.Rules)) - for i := range b1.Rules { - suite.compareRule(b1.Rules[i], b2.Rules[i]) - } + tu.Eventually(suite.Require(), func() bool { + if b2.ID != b1.ID || b2.Index != b1.Index || b2.Override != b1.Override || len(b2.Rules) != len(b1.Rules) { + return false + } + for i := range b1.Rules { + if !suite.compareRule(b1.Rules[i], b2.Rules[i]) { + return false + } + } + return true + }) } -func (suite *ruleTestSuite) compareRule(r1 *placement.Rule, r2 *placement.Rule) { - suite.Equal(r2.GroupID, r1.GroupID) - suite.Equal(r2.ID, r1.ID) - suite.Equal(r2.StartKeyHex, r1.StartKeyHex) - suite.Equal(r2.EndKeyHex, r1.EndKeyHex) - suite.Equal(r2.Role, r1.Role) - suite.Equal(r2.Count, r1.Count) +func (suite *ruleTestSuite) compareRule(r1 *placement.Rule, r2 *placement.Rule) bool { + return r2.GroupID == r1.GroupID && + r2.ID == r1.ID && + r2.StartKeyHex == r1.StartKeyHex && + r2.EndKeyHex == r1.EndKeyHex && + r2.Role == r1.Role && + r2.Count == r1.Count } type regionRuleTestSuite struct { diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 38f691a4eda..4d6dde6f2b9 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -18,6 +18,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "testing" "time" @@ -447,18 +448,22 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) resp = make(map[string]interface{}) - suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) - exceptMap["4"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} - suite.Equal(exceptMap, resp["store-id-ranges"]) + tu.Eventually(re, func() bool { + suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) + exceptMap["4"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} + return reflect.DeepEqual(exceptMap, resp["store-id-ranges"]) + }) // using /pd/v1/schedule-config/evict-leader-scheduler/config to delete exist store from evict-leader-scheduler deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", leaderAddr, apiPrefix, server.SchedulerConfigHandlerPath, name, "4") err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) suite.NoError(err) resp = make(map[string]interface{}) - suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) - delete(exceptMap, "4") - suite.Equal(exceptMap, resp["store-id-ranges"]) + tu.Eventually(re, func() bool { + suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) + delete(exceptMap, "4") + return reflect.DeepEqual(exceptMap, resp["store-id-ranges"]) + }) err = tu.CheckDelete(testDialClient, deleteURL, tu.Status(re, http.StatusNotFound)) suite.NoError(err) }, diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go index 8d8cf40e692..4a4a91f2661 100644 --- a/tests/server/config/config_test.go +++ b/tests/server/config/config_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "testing" "time" @@ -272,10 +273,11 @@ func (suite *configTestSuite) checkConfigReplication(cluster *tests.TestCluster) suite.NoError(err) rc4 := &sc.ReplicationConfig{} - err = tu.ReadGetJSON(re, testDialClient, addr, rc4) - suite.NoError(err) - - suite.Equal(*rc4, *rc) + tu.Eventually(re, func() bool { + err = tu.ReadGetJSON(re, testDialClient, addr, rc4) + suite.NoError(err) + return reflect.DeepEqual(*rc4, *rc) + }) } func (suite *configTestSuite) TestConfigLabelProperty() {