diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index a05cc3f3de5..b8b981edd9e 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -138,6 +138,12 @@ func (conf *evictLeaderSchedulerConfig) resetStoreLocked(id uint64, keyRange []c conf.StoreIDWithRanges[id] = keyRange } +func (conf *evictLeaderSchedulerConfig) resetStore(id uint64, keyRange []core.KeyRange) { + conf.Lock() + defer conf.Unlock() + conf.resetStoreLocked(id, keyRange) +} + func (conf *evictLeaderSchedulerConfig) getKeyRangesByID(id uint64) []core.KeyRange { conf.RLock() defer conf.RUnlock() @@ -207,7 +213,9 @@ func (conf *evictLeaderSchedulerConfig) pauseLeaderTransferIfStoreNotExist(id ui func (conf *evictLeaderSchedulerConfig) update(id uint64, newRanges []core.KeyRange, batch int) error { conf.Lock() defer conf.Unlock() - conf.StoreIDWithRanges[id] = newRanges + if id != 0 { + conf.StoreIDWithRanges[id] = newRanges + } conf.Batch = batch err := conf.persistLocked() if err != nil { @@ -218,28 +226,34 @@ func (conf *evictLeaderSchedulerConfig) update(id uint64, newRanges []core.KeyRa func (conf *evictLeaderSchedulerConfig) delete(id uint64) (any, error) { conf.Lock() - defer conf.Lock() keyRanges := conf.StoreIDWithRanges[id] succ, last := conf.removeStoreLocked(id) var resp any - if succ { - err := conf.persistLocked() - if err != nil { - conf.resetStoreLocked(id, keyRanges) - return resp, err - } - if last { - if err := conf.removeSchedulerCb(EvictLeaderName); err != nil { - if !errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { - conf.resetStoreLocked(id, keyRanges) - } - return resp, err - } - resp = lastStoreDeleteInfo - } + + if !succ { + conf.Unlock() + return resp, errs.ErrScheduleConfigNotExist.FastGenByArgs() + } + + err := conf.persistLocked() + if err != nil { + conf.resetStoreLocked(id, keyRanges) + conf.Unlock() + return resp, err + } + if !last { + conf.Unlock() return resp, nil } - return resp, errs.ErrScheduleConfigNotExist.FastGenByArgs() + conf.Unlock() + if err := conf.removeSchedulerCb(EvictLeaderName); err != nil { + if !errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { + conf.resetStore(id, keyRanges) + } + return resp, err + } + resp = lastStoreDeleteInfo + return resp, nil } type evictLeaderScheduler struct { @@ -428,12 +442,14 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R } } - batch, ok := input["batch"].(float64) + batch := handler.config.getBatch() + batchFloat, ok := input["batch"].(float64) if ok { - if batch < 0 || batch > 10 { - handler.rd.JSON(w, http.StatusBadRequest, "batch is invalid, it should be in [0, 10") + if batchFloat < 0 || batchFloat > 10 { + handler.rd.JSON(w, http.StatusBadRequest, "batch is invalid, it should be in [0, 10]") return } + batch = (int)(batchFloat) } ranges, ok := (input["ranges"]).([]string) @@ -446,15 +462,13 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R ranges = handler.config.getRanges(id) } - if len(ranges) != 0 { - newRanges, err = getKeyRanges(ranges) - if err != nil { - handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) - return - } + newRanges, err = getKeyRanges(ranges) + if err != nil { + handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) + return } - err = handler.config.update(id, newRanges, int(batch)) + err = handler.config.update(id, newRanges, batch) if err != nil { handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) return @@ -477,7 +491,7 @@ func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.R resp, err := handler.config.delete(id) if err != nil { - if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { + if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) || errors.ErrorEqual(err, errs.ErrScheduleConfigNotExist.FastGenByArgs()) { handler.rd.JSON(w, http.StatusNotFound, err.Error()) } else { handler.rd.JSON(w, http.StatusInternalServerError, err.Error()) @@ -485,11 +499,7 @@ func (handler *evictLeaderHandler) DeleteConfig(w http.ResponseWriter, r *http.R return } - if resp != nil { - handler.rd.JSON(w, http.StatusOK, resp) - return - } - handler.rd.JSON(w, http.StatusNotFound, err.Error()) + handler.rd.JSON(w, http.StatusOK, resp) } func newEvictLeaderHandler(config *evictLeaderSchedulerConfig) http.Handler { diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index 384783581c8..63f7cde3b15 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -89,17 +89,43 @@ func TestConfigClone(t *testing.T) { emptyConf := &evictLeaderSchedulerConfig{StoreIDWithRanges: make(map[uint64][]core.KeyRange)} con2 := emptyConf.Clone() - re.Empty(emptyConf.getKeyRangesByID(1)) - re.NotEmpty(con2.getKeyRangesByID(1)) - re.Empty(emptyConf.getKeyRangesByID(1)) + re.Empty(con2.getKeyRangesByID(1)) + con2.StoreIDWithRanges[1], _ = getKeyRanges([]string{"a", "b", "c", "d"}) con3 := con2.Clone() - con3.StoreIDWithRanges[1], _ = getKeyRanges([]string{"a", "b", "c", "d"}) - re.Empty(emptyConf.getKeyRangesByID(1)) - re.NotEqual(len(con3.getRanges(1)), len(con2.getRanges(1))) + re.Equal(len(con3.getRanges(1)), len(con2.getRanges(1))) + con3.StoreIDWithRanges[1][0].StartKey = []byte("aaa") con4 := con3.Clone() re.True(bytes.Equal(con4.StoreIDWithRanges[1][0].StartKey, con3.StoreIDWithRanges[1][0].StartKey)) - con4.StoreIDWithRanges[1][0].StartKey = []byte("aaa") - re.False(bytes.Equal(con4.StoreIDWithRanges[1][0].StartKey, con3.StoreIDWithRanges[1][0].StartKey)) + + con4.Batch = 10 + con5 := con4.Clone() + re.Equal(con5.getBatch(), con4.getBatch()) +} + +func TestBatchEvict(t *testing.T) { + re := require.New(t) + cancel, _, tc, oc := prepareSchedulersTest() + defer cancel() + + // Add stores 1, 2, 3 + tc.AddLeaderStore(1, 0) + tc.AddLeaderStore(2, 0) + tc.AddLeaderStore(3, 0) + // the random might be the same, so we add 1000 regions to make sure the batch is full + for i := 1; i <= 1000; i++ { + tc.AddLeaderRegion(uint64(i), 1, 2, 3) + } + tc.AddLeaderRegion(6, 2, 1, 3) + tc.AddLeaderRegion(7, 3, 1, 2) + + sl, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) + re.NoError(err) + re.True(sl.IsScheduleAllowed(tc)) + ops, _ := sl.Schedule(tc, false) + re.Len(ops, 3) + sl.(*evictLeaderScheduler).conf.Batch = 5 + ops, _ = sl.Schedule(tc, false) + re.Len(ops, 5) } diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 6bca686404d..777c8b3d625 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -137,6 +137,7 @@ func schedulersRegister() { return err } conf.StoreIDWithRanges[id] = ranges + conf.Batch = EvictLeaderBatchSize return nil } }) diff --git a/tools/pd-ctl/tests/scheduler/scheduler_test.go b/tools/pd-ctl/tests/scheduler/scheduler_test.go index 96a1f5557f9..488b5283612 100644 --- a/tools/pd-ctl/tests/scheduler/scheduler_test.go +++ b/tools/pd-ctl/tests/scheduler/scheduler_test.go @@ -140,7 +140,7 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *pdTests.TestCluster) { testutil.Eventually(re, func() bool { configInfo := make(map[string]any) mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", schedulerName}, &configInfo) - return reflect.DeepEqual(expectedConfig, configInfo) + return reflect.DeepEqual(expectedConfig["store-id-ranges"], configInfo["store-id-ranges"]) }) } @@ -530,6 +530,27 @@ func (suite *schedulerTestSuite) checkSchedulerConfig(cluster *pdTests.TestClust return !strings.Contains(echo, "shuffle-hot-region-scheduler") }) + // test evict leader scheduler + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "add", "evict-leader-scheduler", "1"}, nil) + re.Contains(echo, "Success!") + testutil.Eventually(re, func() bool { + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, nil) + return strings.Contains(echo, "evict-leader-scheduler") + }) + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "evict-leader-scheduler", "set", "batch", "5"}, nil) + re.Contains(echo, "Success!") + conf = make(map[string]any) + testutil.Eventually(re, func() bool { + mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "evict-leader-scheduler"}, &conf) + return conf["batch"] == 5. + }) + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", "evict-leader-scheduler-1"}, nil) + re.Contains(echo, "Success!") + testutil.Eventually(re, func() bool { + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, nil) + return !strings.Contains(echo, "evict-leader-scheduler") + }) + // test balance leader config conf = make(map[string]any) conf1 := make(map[string]any)