From ede41125219aa7890a95d72d09b782e08c1c8307 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Fri, 2 Aug 2024 17:03:15 +0800 Subject: [PATCH 01/12] save work Signed-off-by: okJiang <819421878@qq.com> --- pkg/mcs/scheduling/server/cluster.go | 5 +- pkg/mock/mockconfig/mockconfig.go | 12 +- pkg/schedule/config/config_provider.go | 6 +- pkg/schedule/coordinator.go | 32 +++-- .../schedulers/hot_region_rank_v2_test.go | 12 +- pkg/schedule/schedulers/hot_region_test.go | 64 +++++----- pkg/schedule/schedulers/init.go | 86 +++++++------ pkg/schedule/schedulers/scheduler.go | 30 +++-- pkg/schedule/schedulers/scheduler_test.go | 4 +- pkg/schedule/type/type.go | 114 +++++++++++------- plugin/scheduler_example/evict_leader.go | 4 +- server/handler.go | 4 +- 12 files changed, 225 insertions(+), 148 deletions(-) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 955af4b9b4a..e933643a237 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -309,11 +309,12 @@ func (c *Cluster) updateScheduler() { ) // Create the newly added schedulers. for _, scheduler := range latestSchedulersConfig { + schedulerType := types.SchedulerTypeCompatibleMapReverse[scheduler.Type] s, err := schedulers.CreateScheduler( - scheduler.Type, + schedulerType, c.coordinator.GetOperatorController(), c.storage, - schedulers.ConfigSliceDecoder(scheduler.Type, scheduler.Args), + schedulers.ConfigSliceDecoder(schedulerType, scheduler.Args), schedulersController.RemoveScheduler, ) if err != nil { diff --git a/pkg/mock/mockconfig/mockconfig.go b/pkg/mock/mockconfig/mockconfig.go index 0516f9cd467..f8ccbf02066 100644 --- a/pkg/mock/mockconfig/mockconfig.go +++ b/pkg/mock/mockconfig/mockconfig.go @@ -16,14 +16,24 @@ package mockconfig import ( sc "github.com/tikv/pd/pkg/schedule/config" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/server/config" ) +// oldName2NewType is used to convert old scheduler name to new scheduler type. +// It is just used for testing. +var oldName2NewType = map[string]types.CheckerSchedulerType{ + "balance-leader": types.BalanceLeaderScheduler, + "balance-region": types.BalanceRegionScheduler, + "hot-region": types.BalanceHotRegionScheduler, + "evict-slow-store": types.EvictLeaderScheduler, +} + // NewTestOptions creates default options for testing. func NewTestOptions() *config.PersistOptions { // register default schedulers in case config check fail. for _, d := range sc.DefaultSchedulers { - sc.RegisterScheduler(d.Type) + sc.RegisterScheduler(oldName2NewType[d.Type]) } c := config.NewConfig() c.Adjust(nil, false) diff --git a/pkg/schedule/config/config_provider.go b/pkg/schedule/config/config_provider.go index 51ade0edb77..d7bc38a7c03 100644 --- a/pkg/schedule/config/config_provider.go +++ b/pkg/schedule/config/config_provider.go @@ -33,13 +33,13 @@ const RejectLeader = "reject-leader" var schedulerMap sync.Map // RegisterScheduler registers the scheduler type. -func RegisterScheduler(typ string) { +func RegisterScheduler(typ types.CheckerSchedulerType) { schedulerMap.Store(typ, struct{}{}) } // IsSchedulerRegistered checks if the named scheduler type is registered. -func IsSchedulerRegistered(name string) bool { - _, ok := schedulerMap.Load(name) +func IsSchedulerRegistered(typ types.CheckerSchedulerType) bool { + _, ok := schedulerMap.Load(typ) return ok } diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index cb935801ce2..82beb91d9b3 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -34,6 +34,7 @@ import ( "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/logutil" @@ -187,7 +188,7 @@ func (c *Coordinator) driveSlowNodeScheduler() { // If the cluster was set up with `raft-kv2` engine, this cluster should // enable `evict-slow-trend` scheduler as default. if c.GetCluster().GetStoreConfig().IsRaftKV2() { - typ := schedulers.EvictSlowTrendType + typ := types.EvictSlowTrendScheduler args := []string{} s, err := schedulers.CreateScheduler(typ, c.opController, c.cluster.GetStorage(), schedulers.ConfigSliceDecoder(typ, args), c.schedulers.RemoveScheduler) @@ -288,7 +289,8 @@ func (c *Coordinator) InitSchedulers(needRun bool) { log.Info("skip create scheduler with independent configuration", zap.String("scheduler-name", name), zap.String("scheduler-type", cfg.Type), zap.Strings("scheduler-args", cfg.Args)) continue } - s, err := schedulers.CreateScheduler(cfg.Type, c.opController, c.cluster.GetStorage(), schedulers.ConfigJSONDecoder([]byte(data)), c.schedulers.RemoveScheduler) + s, err := schedulers.CreateScheduler(types.SchedulerStr2Type[cfg.Type], c.opController, + c.cluster.GetStorage(), schedulers.ConfigJSONDecoder([]byte(data)), c.schedulers.RemoveScheduler) if err != nil { log.Error("can not create scheduler with independent configuration", zap.String("scheduler-name", name), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) continue @@ -296,12 +298,14 @@ func (c *Coordinator) InitSchedulers(needRun bool) { if needRun { log.Info("create scheduler with independent configuration", zap.String("scheduler-name", s.GetName())) if err = c.schedulers.AddScheduler(s); err != nil { - log.Error("can not add scheduler with independent configuration", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) + log.Error("can not add scheduler with independent configuration", + zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) } } else { log.Info("create scheduler handler with independent configuration", zap.String("scheduler-name", s.GetName())) if err = c.schedulers.AddSchedulerHandler(s); err != nil { - log.Error("can not add scheduler handler with independent configuration", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) + log.Error("can not add scheduler handler with independent configuration", + zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) } } } @@ -316,16 +320,22 @@ func (c *Coordinator) InitSchedulers(needRun bool) { continue } - s, err := schedulers.CreateScheduler(schedulerCfg.Type, c.opController, c.cluster.GetStorage(), schedulers.ConfigSliceDecoder(schedulerCfg.Type, schedulerCfg.Args), c.schedulers.RemoveScheduler) + tp := types.SchedulerStr2Type[schedulerCfg.Type] + s, err := schedulers.CreateScheduler(tp, c.opController, + c.cluster.GetStorage(), schedulers.ConfigSliceDecoder(tp, schedulerCfg.Args), c.schedulers.RemoveScheduler) if err != nil { - log.Error("can not create scheduler", zap.String("scheduler-type", schedulerCfg.Type), zap.Strings("scheduler-args", schedulerCfg.Args), errs.ZapError(err)) + log.Error("can not create scheduler", zap.Stringer("scheduler-type", tp), + zap.Strings("scheduler-args", schedulerCfg.Args), errs.ZapError(err)) continue } if needRun { - log.Info("create scheduler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", schedulerCfg.Args)) - if err = c.schedulers.AddScheduler(s, schedulerCfg.Args...); err != nil && !errors.ErrorEqual(err, errs.ErrSchedulerExisted.FastGenByArgs()) { - log.Error("can not add scheduler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", schedulerCfg.Args), errs.ZapError(err)) + log.Info("create scheduler", zap.String("scheduler-name", s.GetName()), + zap.Strings("scheduler-args", schedulerCfg.Args)) + if err = c.schedulers.AddScheduler(s, schedulerCfg.Args...); err != nil && + !errors.ErrorEqual(err, errs.ErrSchedulerExisted.FastGenByArgs()) { + log.Error("can not add scheduler", zap.String("scheduler-name", + s.GetName()), zap.Strings("scheduler-args", schedulerCfg.Args), errs.ZapError(err)) } else { // Only records the valid scheduler config. scheduleCfg.Schedulers[k] = schedulerCfg @@ -362,7 +372,7 @@ func (c *Coordinator) LoadPlugin(pluginPath string, ch chan string) { log.Error("GetFunction SchedulerType error", errs.ZapError(err)) return } - schedulerType := SchedulerType.(func() string) + schedulerType := SchedulerType.(func() types.CheckerSchedulerType) // get func: SchedulerArgs from plugin SchedulerArgs, err := c.pluginInterface.GetFunction(pluginPath, "SchedulerArgs") if err != nil { @@ -373,7 +383,7 @@ func (c *Coordinator) LoadPlugin(pluginPath string, ch chan string) { // create and add user scheduler s, err := schedulers.CreateScheduler(schedulerType(), c.opController, c.cluster.GetStorage(), schedulers.ConfigSliceDecoder(schedulerType(), schedulerArgs()), c.schedulers.RemoveScheduler) if err != nil { - log.Error("can not create scheduler", zap.String("scheduler-type", schedulerType()), errs.ZapError(err)) + log.Error("can not create scheduler", zap.Stringer("scheduler-type", schedulerType()), errs.ZapError(err)) return } log.Info("create scheduler", zap.String("scheduler-name", s.GetName())) diff --git a/pkg/schedule/schedulers/hot_region_rank_v2_test.go b/pkg/schedule/schedulers/hot_region_rank_v2_test.go index 0237c2156ec..8bd029d4857 100644 --- a/pkg/schedule/schedulers/hot_region_rank_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_rank_v2_test.go @@ -32,7 +32,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.types = []resourceType{writePeer} @@ -93,7 +93,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.types = []resourceType{writePeer} @@ -145,7 +145,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.types = []resourceType{writePeer} @@ -206,7 +206,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetDstToleranceRatio(0.0) @@ -265,7 +265,7 @@ func TestSkipUniformStore(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -419,7 +419,7 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo addOtherRegions func(*mockcluster.Cluster, *hotScheduler)) []*operator.Operator { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetSrcToleranceRatio(1) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 3b563106dc0..5eb73eaf4a9 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/statistics/utils" @@ -40,15 +41,20 @@ import ( "github.com/tikv/pd/pkg/versioninfo" ) +var ( + writeType = types.CheckerSchedulerType(utils.Write.String()) + readType = types.CheckerSchedulerType(utils.Read.String()) +) + func init() { // disable denoising in test. statistics.Denoising = false statisticsInterval = 0 - RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(writeType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { cfg := initHotRegionScheduleConfig() return newHotWriteScheduler(opController, cfg), nil }) - RegisterScheduler(utils.Read.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(readType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { return newHotReadScheduler(opController, initHotRegionScheduleConfig()), nil }) } @@ -203,7 +209,7 @@ func TestSplitIfRegionTooHot(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) b := &metapb.Buckets{ RegionId: 1, @@ -258,7 +264,7 @@ func TestSplitIfRegionTooHot(t *testing.T) { addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 4 * units.MiB, 0, 0}, }) - hb, _ = CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, _ = CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) hb.(*hotScheduler).types = []resourceType{writePeer} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) @@ -277,7 +283,7 @@ func TestSplitBucketsBySize(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() tc.SetRegionBucketEnabled(true) defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) solve := newBalanceSolver(hb.(*hotScheduler), tc, utils.Read, transferLeader) solve.cur = &solution{} @@ -328,7 +334,7 @@ func TestSplitBucketsByLoad(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() tc.SetRegionBucketEnabled(true) defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) solve := newBalanceSolver(hb.(*hotScheduler), tc, utils.Read, transferLeader) solve.cur = &solution{} @@ -397,7 +403,7 @@ func checkHotWriteRegionPlacement(re *require.Assertions, enablePlacementRules b tc.SetEnablePlacementRules(enablePlacementRules) labels := []string{"zone", "host"} tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...) - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) @@ -454,7 +460,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...) tc.SetHotRegionCacheHitsThreshold(0) - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetHistorySampleDuration(0) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.BytePriority, utils.KeyPriority} @@ -649,7 +655,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { }, }, })) - sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + sche, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetHistorySampleDuration(0) @@ -846,7 +852,7 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -881,7 +887,7 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -939,7 +945,7 @@ func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -984,7 +990,7 @@ func TestHotWriteRegionScheduleCheckHot(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -1016,7 +1022,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) hb.(*hotScheduler).types = []resourceType{writeLeader} hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) @@ -1082,7 +1088,7 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim pendingAmpFactor = 0.0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" hb.(*hotScheduler).conf.SetHistorySampleDuration(0) @@ -1166,7 +1172,7 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetEnablePlacementRules(true) - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) @@ -1246,7 +1252,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - scheduler, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + scheduler, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb := scheduler.(*hotScheduler) hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} @@ -1368,7 +1374,7 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -1401,7 +1407,7 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -1461,7 +1467,7 @@ func TestHotReadRegionScheduleWithPendingInfluence(t *testing.T) { func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim int) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) // For test hb.(*hotScheduler).conf.RankFormulaVersion = "v1" @@ -1573,7 +1579,7 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -2039,7 +2045,7 @@ func TestInfluenceByRWType(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -2139,7 +2145,7 @@ func checkHotReadPeerSchedule(re *require.Assertions, enablePlacementRules bool) tc.PutStoreWithLabels(id) } - sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} @@ -2159,7 +2165,7 @@ func TestHotScheduleWithPriority(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.SetDstToleranceRatio(1.05) @@ -2197,7 +2203,7 @@ func TestHotScheduleWithPriority(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) // assert read priority schedule - hb, err = CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err = CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) tc.UpdateStorageReadStats(5, 10*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageReadStats(4, 10*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) @@ -2218,7 +2224,7 @@ func TestHotScheduleWithPriority(t *testing.T) { re.Len(ops, 1) operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) - hb, err = CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err = CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.RankFormulaVersion = "v1" @@ -2261,7 +2267,7 @@ func TestHotScheduleWithStddev(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writePeer} hb.(*hotScheduler).conf.SetDstToleranceRatio(1.0) @@ -2320,7 +2326,7 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { pendingAmpFactor = 0.0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).types = []resourceType{writeLeader} hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -2356,7 +2362,7 @@ func TestCompatibility(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(writeType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) // default checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 988bbc30475..f8939bf02f7 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -22,6 +22,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" ) @@ -36,7 +37,7 @@ func Register() { func schedulersRegister() { // balance leader - RegisterSliceDecoderBuilder(BalanceLeaderType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.BalanceLeaderScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*balanceLeaderSchedulerConfig) if !ok { @@ -52,7 +53,8 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.BalanceLeaderScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceLeaderSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -64,7 +66,7 @@ func schedulersRegister() { }) // balance region - RegisterSliceDecoderBuilder(BalanceRegionType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.BalanceRegionScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*balanceRegionSchedulerConfig) if !ok { @@ -79,7 +81,8 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceRegionType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.BalanceRegionScheduler, func(opController *operator.Controller, + _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceRegionSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -88,7 +91,7 @@ func schedulersRegister() { }) // balance witness - RegisterSliceDecoderBuilder(BalanceWitnessType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.BalanceWitnessScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*balanceWitnessSchedulerConfig) if !ok { @@ -104,7 +107,8 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceWitnessType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.BalanceWitnessScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceWitnessSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -116,7 +120,7 @@ func schedulersRegister() { }) // evict leader - RegisterSliceDecoderBuilder(EvictLeaderType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.EvictLeaderScheduler, func(args []string) ConfigDecoder { return func(v any) error { if len(args) != 1 { return errs.ErrSchedulerConfig.FastGenByArgs("id") @@ -141,7 +145,8 @@ func schedulersRegister() { } }) - RegisterScheduler(EvictLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.EvictLeaderScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { conf := &evictLeaderSchedulerConfig{StoreIDWithRanges: make(map[uint64][]core.KeyRange), storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -152,13 +157,14 @@ func schedulersRegister() { }) // evict slow store - RegisterSliceDecoderBuilder(EvictSlowStoreType, func([]string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.EvictSlowStoreScheduler, func([]string) ConfigDecoder { return func(any) error { return nil } }) - RegisterScheduler(EvictSlowStoreType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.EvictSlowStoreScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initEvictSlowStoreSchedulerConfig(storage) if err := decoder(conf); err != nil { return nil, err @@ -168,7 +174,7 @@ func schedulersRegister() { }) // grant hot region - RegisterSliceDecoderBuilder(GrantHotRegionType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.GrantHotRegionScheduler, func(args []string) ConfigDecoder { return func(v any) error { if len(args) != 2 { return errs.ErrSchedulerConfig.FastGenByArgs("id") @@ -198,7 +204,8 @@ func schedulersRegister() { } }) - RegisterScheduler(GrantHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.GrantHotRegionScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &grantHotRegionSchedulerConfig{StoreIDs: make([]uint64, 0), storage: storage} conf.cluster = opController.GetCluster() if err := decoder(conf); err != nil { @@ -208,13 +215,14 @@ func schedulersRegister() { }) // hot region - RegisterSliceDecoderBuilder(HotRegionType, func([]string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.BalanceHotRegionScheduler, func([]string) ConfigDecoder { return func(any) error { return nil } }) - RegisterScheduler(HotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.BalanceHotRegionScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initHotRegionScheduleConfig() var data map[string]any if err := decoder(&data); err != nil { @@ -235,7 +243,7 @@ func schedulersRegister() { }) // grant leader - RegisterSliceDecoderBuilder(GrantLeaderType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.GrantLeaderScheduler, func(args []string) ConfigDecoder { return func(v any) error { if len(args) != 1 { return errs.ErrSchedulerConfig.FastGenByArgs("id") @@ -259,7 +267,8 @@ func schedulersRegister() { } }) - RegisterScheduler(GrantLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.GrantLeaderScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { conf := &grantLeaderSchedulerConfig{StoreIDWithRanges: make(map[uint64][]core.KeyRange), storage: storage} conf.cluster = opController.GetCluster() conf.removeSchedulerCb = removeSchedulerCb[0] @@ -270,7 +279,7 @@ func schedulersRegister() { }) // label - RegisterSliceDecoderBuilder(LabelType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.LabelScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*labelSchedulerConfig) if !ok { @@ -285,7 +294,8 @@ func schedulersRegister() { } }) - RegisterScheduler(LabelType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.LabelScheduler, func(opController *operator.Controller, + _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &labelSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -294,7 +304,7 @@ func schedulersRegister() { }) // random merge - RegisterSliceDecoderBuilder(RandomMergeType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.RandomMergeScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*randomMergeSchedulerConfig) if !ok { @@ -309,7 +319,8 @@ func schedulersRegister() { } }) - RegisterScheduler(RandomMergeType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.RandomMergeScheduler, func(opController *operator.Controller, + _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &randomMergeSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -319,7 +330,7 @@ func schedulersRegister() { // scatter range // args: [start-key, end-key, range-name]. - RegisterSliceDecoderBuilder(ScatterRangeType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.ScatterRangeScheduler, func(args []string) ConfigDecoder { return func(v any) error { if len(args) != 3 { return errs.ErrSchedulerConfig.FastGenByArgs("ranges and name") @@ -338,7 +349,8 @@ func schedulersRegister() { } }) - RegisterScheduler(ScatterRangeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.ScatterRangeScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &scatterRangeSchedulerConfig{ storage: storage, } @@ -353,7 +365,7 @@ func schedulersRegister() { }) // shuffle hot region - RegisterSliceDecoderBuilder(ShuffleHotRegionType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.ShuffleHotRegionScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*shuffleHotRegionSchedulerConfig) if !ok { @@ -371,7 +383,8 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.ShuffleHotRegionScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleHotRegionSchedulerConfig{Limit: uint64(1)} if err := decoder(conf); err != nil { return nil, err @@ -381,7 +394,7 @@ func schedulersRegister() { }) // shuffle leader - RegisterSliceDecoderBuilder(ShuffleLeaderType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.ShuffleLeaderScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*shuffleLeaderSchedulerConfig) if !ok { @@ -397,7 +410,8 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.ShuffleLeaderScheduler, func(opController *operator.Controller, + _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleLeaderSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -406,7 +420,7 @@ func schedulersRegister() { }) // shuffle region - RegisterSliceDecoderBuilder(ShuffleRegionType, func(args []string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.ShuffleRegionScheduler, func(args []string) ConfigDecoder { return func(v any) error { conf, ok := v.(*shuffleRegionSchedulerConfig) if !ok { @@ -422,7 +436,8 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.ShuffleRegionScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleRegionSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -431,13 +446,14 @@ func schedulersRegister() { }) // split bucket - RegisterSliceDecoderBuilder(SplitBucketType, func([]string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.SplitBucketScheduler, func([]string) ConfigDecoder { return func(any) error { return nil } }) - RegisterScheduler(SplitBucketType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.SplitBucketScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initSplitBucketConfig() if err := decoder(conf); err != nil { return nil, err @@ -447,24 +463,26 @@ func schedulersRegister() { }) // transfer witness leader - RegisterSliceDecoderBuilder(TransferWitnessLeaderType, func([]string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.TransferWitnessLeaderScheduler, func([]string) ConfigDecoder { return func(any) error { return nil } }) - RegisterScheduler(TransferWitnessLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.TransferWitnessLeaderScheduler, func(opController *operator.Controller, + _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { return newTransferWitnessLeaderScheduler(opController), nil }) // evict slow store by trend - RegisterSliceDecoderBuilder(EvictSlowTrendType, func([]string) ConfigDecoder { + RegisterSliceDecoderBuilder(types.EvictSlowTrendScheduler, func([]string) ConfigDecoder { return func(any) error { return nil } }) - RegisterScheduler(EvictSlowTrendType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + RegisterScheduler(types.EvictSlowTrendScheduler, func(opController *operator.Controller, + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initEvictSlowTrendSchedulerConfig(storage) if err := decoder(conf); err != nil { return nil, err diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index 894544d9617..dde25789c61 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -99,11 +99,11 @@ func ConfigJSONDecoder(data []byte) ConfigDecoder { } // ConfigSliceDecoder the default decode for the config. -func ConfigSliceDecoder(name string, args []string) ConfigDecoder { - builder, ok := schedulerArgsToDecoder[name] +func ConfigSliceDecoder(typ types.CheckerSchedulerType, args []string) ConfigDecoder { + builder, ok := schedulerArgsToDecoder[typ] if !ok { return func(any) error { - return errors.Errorf("the config decoder do not register for %s", name) + return errors.Errorf("the config decoder do not register for %v", typ) } } return builder(args) @@ -113,31 +113,37 @@ func ConfigSliceDecoder(name string, args []string) ConfigDecoder { type CreateSchedulerFunc func(opController *operator.Controller, storage endpoint.ConfigStorage, dec ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) var ( - schedulerMap = make(map[string]CreateSchedulerFunc) - schedulerArgsToDecoder = make(map[string]ConfigSliceDecoderBuilder) + schedulerMap = make(map[types.CheckerSchedulerType]CreateSchedulerFunc) + schedulerArgsToDecoder = make(map[types.CheckerSchedulerType]ConfigSliceDecoderBuilder) ) // RegisterScheduler binds a scheduler creator. It should be called in init() // func of a package. -func RegisterScheduler(typ string, createFn CreateSchedulerFunc) { +func RegisterScheduler(typ types.CheckerSchedulerType, createFn CreateSchedulerFunc) { if _, ok := schedulerMap[typ]; ok { - log.Fatal("duplicated scheduler", zap.String("type", typ), errs.ZapError(errs.ErrSchedulerDuplicated)) + log.Fatal("duplicated scheduler", zap.Stringer("type", typ), errs.ZapError(errs.ErrSchedulerDuplicated)) } schedulerMap[typ] = createFn } // RegisterSliceDecoderBuilder convert arguments to config. It should be called in init() // func of package. -func RegisterSliceDecoderBuilder(typ string, builder ConfigSliceDecoderBuilder) { +func RegisterSliceDecoderBuilder(typ types.CheckerSchedulerType, builder ConfigSliceDecoderBuilder) { if _, ok := schedulerArgsToDecoder[typ]; ok { - log.Fatal("duplicated scheduler", zap.String("type", typ), errs.ZapError(errs.ErrSchedulerDuplicated)) + log.Fatal("duplicated scheduler", zap.Stringer("type", typ), errs.ZapError(errs.ErrSchedulerDuplicated)) } schedulerArgsToDecoder[typ] = builder config.RegisterScheduler(typ) } // CreateScheduler creates a scheduler with registered creator func. -func CreateScheduler(typ string, oc *operator.Controller, storage endpoint.ConfigStorage, dec ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { +func CreateScheduler( + typ types.CheckerSchedulerType, + oc *operator.Controller, + storage endpoint.ConfigStorage, + dec ConfigDecoder, + removeSchedulerCb ...func(string) error, +) (Scheduler, error) { fn, ok := schedulerMap[typ] if !ok { return nil, errs.ErrSchedulerCreateFuncNotRegistered.FastGenByArgs(typ) @@ -159,9 +165,9 @@ func SaveSchedulerConfig(storage endpoint.ConfigStorage, s Scheduler) error { func FindSchedulerTypeByName(name string) string { var typ string for registeredType := range schedulerMap { - if strings.Contains(name, registeredType) { + if strings.Contains(name, registeredType.String()) { if len(registeredType) > len(typ) { - typ = registeredType + typ = registeredType.String() } } } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 48040841c76..e6e8d192a24 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -211,7 +211,7 @@ func TestHotRegionScheduleAbnormalReplica(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetHotRegionScheduleLimit(0) - hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(readType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) tc.AddRegionStore(1, 3) @@ -359,7 +359,7 @@ func TestSpecialUseHotRegion(t *testing.T) { tc.AddLeaderRegionWithWriteInfo(3, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}) tc.AddLeaderRegionWithWriteInfo(4, 2, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{1, 3}) tc.AddLeaderRegionWithWriteInfo(5, 3, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{1, 2}) - hs, err := CreateScheduler(utils.Write.String(), oc, storage, cd) + hs, err := CreateScheduler(writeType, oc, storage, cd) re.NoError(err) for i := 0; i < 100; i++ { ops, _ = hs.Schedule(tc, false) diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 1f6211a9783..1bfe7dcdeca 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -73,48 +73,74 @@ const ( LabelScheduler CheckerSchedulerType = "label-scheduler" ) -// SchedulerTypeCompatibleMap exists for compatibility. -// -// It is used in the `PersistOptions` and `PersistConfig`. These two structs -// are persisted in the storage, so we need to keep the compatibility. -var SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ - BalanceLeaderScheduler: "balance-leader", - BalanceRegionScheduler: "balance-region", - BalanceWitnessScheduler: "balance-witness", - EvictLeaderScheduler: "evict-leader", - EvictSlowStoreScheduler: "evict-slow-store", - EvictSlowTrendScheduler: "evict-slow-trend", - GrantLeaderScheduler: "grant-leader", - GrantHotRegionScheduler: "grant-hot-region", - BalanceHotRegionScheduler: "hot-region", - RandomMergeScheduler: "random-merge", - ScatterRangeScheduler: "scatter-range", - ShuffleHotRegionScheduler: "shuffle-hot-region", - ShuffleLeaderScheduler: "shuffle-leader", - ShuffleRegionScheduler: "shuffle-region", - SplitBucketScheduler: "split-bucket", - TransferWitnessLeaderScheduler: "transfer-witness-leader", - LabelScheduler: "label", -} +var ( + // SchedulerTypeCompatibleMap exists for compatibility. + // + // It is used in the `PersistOptions` and `PersistConfig`. These two structs + // are persisted in the storage, so we need to keep the compatibility. + SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ + BalanceLeaderScheduler: "balance-leader", + BalanceRegionScheduler: "balance-region", + BalanceWitnessScheduler: "balance-witness", + EvictLeaderScheduler: "evict-leader", + EvictSlowStoreScheduler: "evict-slow-store", + EvictSlowTrendScheduler: "evict-slow-trend", + GrantLeaderScheduler: "grant-leader", + GrantHotRegionScheduler: "grant-hot-region", + BalanceHotRegionScheduler: "hot-region", + RandomMergeScheduler: "random-merge", + ScatterRangeScheduler: "scatter-range", + ShuffleHotRegionScheduler: "shuffle-hot-region", + ShuffleLeaderScheduler: "shuffle-leader", + ShuffleRegionScheduler: "shuffle-region", + SplitBucketScheduler: "split-bucket", + TransferWitnessLeaderScheduler: "transfer-witness-leader", + LabelScheduler: "label", + } -// SchedulerStr2Type is a map to convert the scheduler string to the CheckerSchedulerType. -var SchedulerStr2Type = map[string]CheckerSchedulerType{ - "balance-leader-scheduler": BalanceLeaderScheduler, - "balance-region-scheduler": BalanceRegionScheduler, - "balance-witness-scheduler": BalanceWitnessScheduler, - "evict-leader-scheduler": EvictLeaderScheduler, - "evict-slow-store-scheduler": EvictSlowStoreScheduler, - "evict-slow-trend-scheduler": EvictSlowTrendScheduler, - "grant-leader-scheduler": GrantLeaderScheduler, - "grant-hot-region-scheduler": GrantHotRegionScheduler, - "balance-hot-region-scheduler": BalanceHotRegionScheduler, - "random-merge-scheduler": RandomMergeScheduler, - // TODO: update to `scatter-range-scheduler` - "scatter-range": ScatterRangeScheduler, - "shuffle-hot-region-scheduler": ShuffleHotRegionScheduler, - "shuffle-leader-scheduler": ShuffleLeaderScheduler, - "shuffle-region-scheduler": ShuffleRegionScheduler, - "split-bucket-scheduler": SplitBucketScheduler, - "transfer-witness-leader-scheduler": TransferWitnessLeaderScheduler, - "label-scheduler": LabelScheduler, -} + // SchedulerTypeCompatibleMapReverse exists for compatibility. + // + // It is used in the `PersistOptions` and `PersistConfig`. These two structs + // are persisted in the storage, so we need to keep the compatibility. + SchedulerTypeCompatibleMapReverse = map[string]CheckerSchedulerType{ + "balance-leader": BalanceLeaderScheduler, + "balance-region": BalanceRegionScheduler, + "balance-witness": BalanceWitnessScheduler, + "evict-leader": EvictLeaderScheduler, + "evict-slow-store": EvictSlowStoreScheduler, + "evict-slow-trend": EvictSlowTrendScheduler, + "grant-leader": GrantLeaderScheduler, + "grant-hot-region": GrantHotRegionScheduler, + "hot-region": BalanceHotRegionScheduler, + "random-merge": RandomMergeScheduler, + "scatter-range": ScatterRangeScheduler, + "shuffle-hot-region": ShuffleHotRegionScheduler, + "shuffle-leader": ShuffleLeaderScheduler, + "shuffle-region": ShuffleRegionScheduler, + "split-bucket": SplitBucketScheduler, + "transfer-witness-leader": TransferWitnessLeaderScheduler, + "label": LabelScheduler, + } + + // SchedulerStr2Type is a map to convert the scheduler string to the CheckerSchedulerType. + SchedulerStr2Type = map[string]CheckerSchedulerType{ + "balance-leader-scheduler": BalanceLeaderScheduler, + "balance-region-scheduler": BalanceRegionScheduler, + "balance-witness-scheduler": BalanceWitnessScheduler, + "evict-leader-scheduler": EvictLeaderScheduler, + "evict-slow-store-scheduler": EvictSlowStoreScheduler, + "evict-slow-trend-scheduler": EvictSlowTrendScheduler, + "grant-leader-scheduler": GrantLeaderScheduler, + "grant-hot-region-scheduler": GrantHotRegionScheduler, + "balance-hot-region-scheduler": BalanceHotRegionScheduler, + "random-merge-scheduler": RandomMergeScheduler, + // TODO: update to `scatter-range-scheduler` + "scatter-range": ScatterRangeScheduler, + "shuffle-hot-region-scheduler": ShuffleHotRegionScheduler, + "shuffle-leader-scheduler": ShuffleLeaderScheduler, + "shuffle-region-scheduler": ShuffleRegionScheduler, + "split-bucket-scheduler": SplitBucketScheduler, + "transfer-witness-leader-scheduler": TransferWitnessLeaderScheduler, + "label-scheduler": LabelScheduler, + } +) diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 83d4771adc4..d6932c4c63e 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -48,7 +48,7 @@ const ( ) func init() { - schedulers.RegisterSliceDecoderBuilder(EvictLeaderType, func(args []string) schedulers.ConfigDecoder { + schedulers.RegisterSliceDecoderBuilder(UserEvictLeaderScheduler, func(args []string) schedulers.ConfigDecoder { return func(v any) error { if len(args) != 1 { return errors.New("should specify the store-id") @@ -71,7 +71,7 @@ func init() { } }) - schedulers.RegisterScheduler(EvictLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder schedulers.ConfigDecoder, _ ...func(string) error) (schedulers.Scheduler, error) { + schedulers.RegisterScheduler(UserEvictLeaderScheduler, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder schedulers.ConfigDecoder, _ ...func(string) error) (schedulers.Scheduler, error) { conf := &evictLeaderSchedulerConfig{StoreIDWitRanges: make(map[uint64][]core.KeyRange), storage: storage} if err := decoder(conf); err != nil { return nil, err diff --git a/server/handler.go b/server/handler.go index d36dd6656ae..69b43d14b31 100644 --- a/server/handler.go +++ b/server/handler.go @@ -187,7 +187,7 @@ func (h *Handler) GetAllRequestHistoryHotRegion(request *HistoryHotRegionsReques // AddScheduler adds a scheduler. func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) error { // TODO: remove this map in subsequent PRs, because we need use new type in the `CreateScheduler`. - name := types.SchedulerTypeCompatibleMap[tp] + // name := types.SchedulerTypeCompatibleMap[tp] c, err := h.GetRaftCluster() if err != nil { return err @@ -199,7 +199,7 @@ func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) er } else { removeSchedulerCb = c.GetCoordinator().GetSchedulersController().RemoveScheduler } - s, err := schedulers.CreateScheduler(name, c.GetOperatorController(), h.s.storage, schedulers.ConfigSliceDecoder(name, args), removeSchedulerCb) + s, err := schedulers.CreateScheduler(tp, c.GetOperatorController(), h.s.storage, schedulers.ConfigSliceDecoder(tp, args), removeSchedulerCb) if err != nil { return err } From d48eaa0c58f559d3263f6fa5e3d1265cef607975 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 10:32:12 +0800 Subject: [PATCH 02/12] fix static Signed-off-by: okJiang <819421878@qq.com> --- pkg/schedule/schedulers/hot_region_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 5eb73eaf4a9..120a20c9219 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -43,7 +43,7 @@ import ( var ( writeType = types.CheckerSchedulerType(utils.Write.String()) - readType = types.CheckerSchedulerType(utils.Read.String()) + readType = types.CheckerSchedulerType(utils.Read.String()) ) func init() { From 915e6ba2ef9777804baa7b7850e037bc2f6fd2b4 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 11:34:09 +0800 Subject: [PATCH 03/12] remove type Signed-off-by: okJiang <819421878@qq.com> --- pkg/schedule/config/config.go | 9 ++-- .../schedulers/balance_benchmark_test.go | 14 +++--- pkg/schedule/schedulers/balance_leader.go | 4 +- pkg/schedule/schedulers/balance_region.go | 4 +- pkg/schedule/schedulers/balance_test.go | 49 ++++++++++--------- pkg/schedule/schedulers/balance_witness.go | 4 +- .../schedulers/balance_witness_test.go | 3 +- pkg/schedule/schedulers/evict_leader.go | 2 - pkg/schedule/schedulers/evict_leader_test.go | 7 +-- pkg/schedule/schedulers/evict_slow_store.go | 2 - .../schedulers/evict_slow_store_test.go | 4 +- pkg/schedule/schedulers/evict_slow_trend.go | 2 - .../schedulers/evict_slow_trend_test.go | 4 +- pkg/schedule/schedulers/grant_hot_region.go | 6 +-- pkg/schedule/schedulers/grant_leader.go | 4 +- pkg/schedule/schedulers/hot_region.go | 2 - pkg/schedule/schedulers/hot_region_test.go | 30 ++++++------ pkg/schedule/schedulers/label.go | 2 - pkg/schedule/schedulers/random_merge.go | 4 +- pkg/schedule/schedulers/scatter_range.go | 2 - pkg/schedule/schedulers/scheduler_test.go | 27 +++++----- pkg/schedule/schedulers/shuffle_hot_region.go | 2 - pkg/schedule/schedulers/shuffle_leader.go | 4 +- pkg/schedule/schedulers/shuffle_region.go | 4 +- pkg/schedule/schedulers/split_bucket.go | 4 +- .../schedulers/transfer_witness_leader.go | 2 - .../transfer_witness_leader_test.go | 5 +- plugin/scheduler_example/evict_leader.go | 2 +- server/cluster/cluster_test.go | 35 +++++++------ tests/server/api/scheduler_test.go | 6 ++- 30 files changed, 114 insertions(+), 135 deletions(-) diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 5a67a547483..f8188db5eff 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core/storelimit" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/utils/configutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" @@ -568,10 +569,10 @@ type SchedulerConfig struct { // If these schedulers are not in the persistent configuration, they // will be created automatically when reloading. var DefaultSchedulers = SchedulerConfigs{ - {Type: "balance-region"}, - {Type: "balance-leader"}, - {Type: "hot-region"}, - {Type: "evict-slow-store"}, + {Type: types.SchedulerTypeCompatibleMap[types.BalanceRegionScheduler]}, + {Type: types.SchedulerTypeCompatibleMap[types.BalanceLeaderScheduler]}, + {Type: types.SchedulerTypeCompatibleMap[types.BalanceHotRegionScheduler]}, + {Type: types.SchedulerTypeCompatibleMap[types.EvictSlowStoreScheduler]}, } // IsDefaultScheduler checks whether the scheduler is enabled by default. diff --git a/pkg/schedule/schedulers/balance_benchmark_test.go b/pkg/schedule/schedulers/balance_benchmark_test.go index 2d7befd27af..4fb6a4fb781 100644 --- a/pkg/schedule/schedulers/balance_benchmark_test.go +++ b/pkg/schedule/schedulers/balance_benchmark_test.go @@ -155,7 +155,7 @@ func BenchmarkPlacementRule(b *testing.B) { re := assert.New(b) cancel, tc, oc := newBenchCluster(true, true, false) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() var ops []*operator.Operator var plans []plan.Plan @@ -171,7 +171,7 @@ func BenchmarkPlacementRule(b *testing.B) { func BenchmarkLabel(b *testing.B) { cancel, tc, oc := newBenchCluster(false, true, false) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() for i := 0; i < b.N; i++ { sc.Schedule(tc, false) @@ -181,7 +181,7 @@ func BenchmarkLabel(b *testing.B) { func BenchmarkNoLabel(b *testing.B) { cancel, tc, oc := newBenchCluster(false, false, false) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() for i := 0; i < b.N; i++ { sc.Schedule(tc, false) @@ -191,7 +191,7 @@ func BenchmarkNoLabel(b *testing.B) { func BenchmarkDiagnosticNoLabel1(b *testing.B) { cancel, tc, oc := newBenchCluster(false, false, false) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() for i := 0; i < b.N; i++ { sc.Schedule(tc, true) @@ -201,7 +201,7 @@ func BenchmarkDiagnosticNoLabel1(b *testing.B) { func BenchmarkDiagnosticNoLabel2(b *testing.B) { cancel, tc, oc := newBenchBigCluster(100, 100) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() for i := 0; i < b.N; i++ { sc.Schedule(tc, true) @@ -211,7 +211,7 @@ func BenchmarkDiagnosticNoLabel2(b *testing.B) { func BenchmarkNoLabel2(b *testing.B) { cancel, tc, oc := newBenchBigCluster(100, 100) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() for i := 0; i < b.N; i++ { sc.Schedule(tc, false) @@ -221,7 +221,7 @@ func BenchmarkNoLabel2(b *testing.B) { func BenchmarkTombStore(b *testing.B) { cancel, tc, oc := newBenchCluster(false, false, true) defer cancel() - sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}, []BalanceRegionCreateOption{WithBalanceRegionName(BalanceRegionType)}...) + sc := newBalanceRegionScheduler(oc, &balanceRegionSchedulerConfig{}) b.ResetTimer() for i := 0; i < b.N; i++ { sc.Schedule(tc, false) diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 6762c8751e4..e91540f681a 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -43,8 +43,6 @@ import ( const ( // BalanceLeaderName is balance leader scheduler name. BalanceLeaderName = "balance-leader-scheduler" - // BalanceLeaderType is balance leader scheduler type. - BalanceLeaderType = "balance-leader" // BalanceLeaderBatchSize is the default number of operators to transfer leaders by one scheduling. // Default value is 4 which is subjected by scheduler-max-waiting-operator and leader-schedule-limit // If you want to increase balance speed more, please increase above-mentioned param. @@ -536,7 +534,7 @@ func (l *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.TargetStoreID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(l.GetName(), solver, solver.Region, solver.TargetStoreID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create balance leader operator", errs.ZapError(err)) if collector != nil { diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 3ef01345aea..32e699cd24f 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -33,8 +33,6 @@ import ( const ( // BalanceRegionName is balance region scheduler name. BalanceRegionName = "balance-region-scheduler" - // BalanceRegionType is balance region scheduler type. - BalanceRegionType = "balance-region" ) type balanceRegionSchedulerConfig struct { @@ -242,7 +240,7 @@ func (s *balanceRegionScheduler) transferPeer(solver *solver, collector *plan.Co oldPeer := solver.Region.GetStorePeer(sourceID) newPeer := &metapb.Peer{StoreId: solver.Target.GetID(), Role: oldPeer.Role} solver.Step++ - op, err := operator.CreateMovePeerOperator(BalanceRegionType, solver, solver.Region, operator.OpRegion, oldPeer.GetStoreId(), newPeer) + op, err := operator.CreateMovePeerOperator(s.GetName(), solver, solver.Region, operator.OpRegion, oldPeer.GetStoreId(), newPeer) if err != nil { balanceRegionCreateOpFailCounter.Inc() if collector != nil { diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 26214ed5456..1969717235a 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" @@ -239,7 +240,7 @@ func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { func (suite *balanceLeaderSchedulerTestSuite) SetupTest() { re := suite.Require() suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() - lb, err := CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) suite.lb = lb } @@ -583,34 +584,34 @@ func (suite *balanceLeaderRangeSchedulerTestSuite) TestSingleRangeBalance() { suite.tc.UpdateStoreLeaderWeight(3, 1) suite.tc.UpdateStoreLeaderWeight(4, 2) suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) - lb, err := CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) ops, _ := lb.Schedule(suite.tc, false) re.NotEmpty(ops) re.Len(ops, 1) re.Len(ops[0].Counters, 1) re.Len(ops[0].FinishedCounters, 1) - lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"h", "n"})) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"h", "n"})) re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) re.Empty(ops) - lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"b", "f"})) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"b", "f"})) re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) re.Empty(ops) - lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", "a"})) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "a"})) re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) re.Empty(ops) - lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"g", ""})) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"g", ""})) re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) re.Empty(ops) - lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", "f"})) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "f"})) re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) re.Empty(ops) - lb, err = CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"b", ""})) + lb, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"b", ""})) re.NoError(err) ops, _ = lb.Schedule(suite.tc, false) re.Empty(ops) @@ -630,7 +631,7 @@ func (suite *balanceLeaderRangeSchedulerTestSuite) TestMultiRangeBalance() { suite.tc.UpdateStoreLeaderWeight(3, 1) suite.tc.UpdateStoreLeaderWeight(4, 2) suite.tc.AddLeaderRegionWithRange(1, "a", "g", 1, 2, 3, 4) - lb, err := CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", "g", "o", "t"})) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", "g", "o", "t"})) re.NoError(err) ops, _ := lb.Schedule(suite.tc, false) re.Equal(uint64(1), ops[0].RegionID()) @@ -669,7 +670,7 @@ func (suite *balanceLeaderRangeSchedulerTestSuite) TestBatchBalance() { suite.tc.AddLeaderRegionWithRange(uint64(102), "102a", "102z", 1, 2, 3) suite.tc.AddLeaderRegionWithRange(uint64(103), "103a", "103z", 4, 5, 6) - lb, err := CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) ops, _ := lb.Schedule(suite.tc, false) re.Len(ops, 2) @@ -761,7 +762,7 @@ func checkBalanceRegionSchedule1(re *require.Assertions, enablePlacementRules bo tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetEnablePlacementRules(enablePlacementRules) tc.SetMaxReplicasWithLabel(enablePlacementRules, 1) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) // Add stores 1,2,3,4. tc.AddRegionStore(1, 6) @@ -816,7 +817,7 @@ func checkReplica3(re *require.Assertions, enablePlacementRules bool) { tc.SetEnablePlacementRules(enablePlacementRules) tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) // Store 1 has the largest region score, so the balance scheduler tries to replace peer in store 1. tc.AddLabelsStore(1, 16, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) @@ -890,7 +891,7 @@ func checkReplica5(re *require.Assertions, enablePlacementRules bool) { tc.SetEnablePlacementRules(enablePlacementRules) tc.SetMaxReplicasWithLabel(enablePlacementRules, 5) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) tc.AddLabelsStore(1, 4, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(2, 5, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}) @@ -991,7 +992,7 @@ func checkBalanceRegionSchedule2(re *require.Assertions, enablePlacementRules bo core.SetApproximateKeys(200), ) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) tc.AddRegionStore(1, 11) @@ -1047,7 +1048,7 @@ func checkBalanceRegionStoreWeight(re *require.Assertions, enablePlacementRules tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetEnablePlacementRules(enablePlacementRules) tc.SetMaxReplicasWithLabel(enablePlacementRules, 1) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) tc.AddRegionStore(1, 10) @@ -1082,7 +1083,7 @@ func checkBalanceRegionOpInfluence(re *require.Assertions, enablePlacementRules tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetEnablePlacementRules(enablePlacementRules) tc.SetMaxReplicasWithLabel(enablePlacementRules, 1) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) // Add stores 1,2,3,4. tc.AddRegionStoreWithLeader(1, 2) @@ -1118,7 +1119,7 @@ func checkReplacePendingRegion(re *require.Assertions, enablePlacementRules bool tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetEnablePlacementRules(enablePlacementRules) tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) // Store 1 has the largest region score, so the balance scheduler try to replace peer in store 1. tc.AddLabelsStore(1, 16, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) @@ -1148,7 +1149,7 @@ func TestBalanceRegionShouldNotBalance(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) region := tc.MockRegionInfo(1, 0, []uint64{2, 3, 4}, nil, nil) tc.PutRegion(region) @@ -1161,7 +1162,7 @@ func TestBalanceRegionEmptyRegion(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - sb, err := CreateScheduler(BalanceRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceRegionType, []string{"", ""})) + sb, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) tc.AddRegionStore(1, 10) tc.AddRegionStore(2, 9) @@ -1207,7 +1208,7 @@ func checkRandomMergeSchedule(re *require.Assertions, enablePlacementRules bool) tc.SetMaxReplicasWithLabel(enablePlacementRules, 3) tc.SetMergeScheduleLimit(1) - mb, err := CreateScheduler(RandomMergeType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(RandomMergeType, []string{"", ""})) + mb, err := CreateScheduler(types.RandomMergeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.RandomMergeScheduler, []string{"", ""})) re.NoError(err) tc.AddRegionStore(1, 4) @@ -1289,7 +1290,7 @@ func checkScatterRangeBalance(re *require.Assertions, enablePlacementRules bool) tc.UpdateStoreStatus(uint64(i)) } - hb, err := CreateScheduler(ScatterRangeType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ScatterRangeType, []string{"s_00", "s_50", "t"})) + hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) re.NoError(err) scheduleAndApplyOperator(tc, hb, 100) @@ -1363,7 +1364,7 @@ func checkBalanceLeaderLimit(re *require.Assertions, enablePlacementRules bool) // test not allow schedule leader tc.SetLeaderScheduleLimit(0) - hb, err := CreateScheduler(ScatterRangeType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ScatterRangeType, []string{"s_00", "s_50", "t"})) + hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) re.NoError(err) scheduleAndApplyOperator(tc, hb, 100) @@ -1387,7 +1388,7 @@ func TestConcurrencyUpdateConfig(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(ScatterRangeType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ScatterRangeType, []string{"s_00", "s_50", "t"})) + hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_50", "t"})) sche := hb.(*scatterRangeScheduler) re.NoError(err) ch := make(chan struct{}) @@ -1460,7 +1461,7 @@ func TestBalanceWhenRegionNotHeartbeat(t *testing.T) { tc.UpdateStoreStatus(uint64(i)) } - hb, err := CreateScheduler(ScatterRangeType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ScatterRangeType, []string{"s_00", "s_09", "t"})) + hb, err := CreateScheduler(types.ScatterRangeScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ScatterRangeScheduler, []string{"s_00", "s_09", "t"})) re.NoError(err) scheduleAndApplyOperator(tc, hb, 100) diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 319a0f2493a..0a856883ebc 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -43,8 +43,6 @@ import ( const ( // BalanceWitnessName is balance witness scheduler name. BalanceWitnessName = "balance-witness-scheduler" - // BalanceWitnessType is balance witness scheduler type. - BalanceWitnessType = "balance-witness" // balanceWitnessBatchSize is the default number of operators to transfer witnesses by one scheduling. // Default value is 4 which is subjected by scheduler-max-waiting-operator and witness-schedule-limit // If you want to increase balance speed more, please increase above-mentioned param. @@ -352,7 +350,7 @@ func (b *balanceWitnessScheduler) createOperator(solver *solver, collector *plan } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateMoveWitnessOperator(BalanceWitnessType, solver, solver.Region, solver.SourceStoreID(), solver.TargetStoreID()) + op, err := operator.CreateMoveWitnessOperator(b.GetName(), solver, solver.Region, solver.SourceStoreID(), solver.TargetStoreID()) if err != nil { log.Debug("fail to create balance witness operator", errs.ZapError(err)) return nil diff --git a/pkg/schedule/schedulers/balance_witness_test.go b/pkg/schedule/schedulers/balance_witness_test.go index 2b6723a5172..d8715f71784 100644 --- a/pkg/schedule/schedulers/balance_witness_test.go +++ b/pkg/schedule/schedulers/balance_witness_test.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" ) @@ -50,7 +51,7 @@ func (suite *balanceWitnessSchedulerTestSuite) SetupTest() { Count: 4, }, }) - lb, err := CreateScheduler(BalanceWitnessType, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceWitnessType, []string{"", ""}), nil) + lb, err := CreateScheduler(types.BalanceWitnessScheduler, suite.oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceWitnessScheduler, []string{"", ""}), nil) re.NoError(err) suite.lb = lb } diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 3aba9a5d184..710e1698a64 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -40,8 +40,6 @@ import ( const ( // EvictLeaderName is evict leader scheduler name. EvictLeaderName = "evict-leader-scheduler" - // EvictLeaderType is evict leader scheduler type. - EvictLeaderType = "evict-leader" // EvictLeaderBatchSize is the number of operators to transfer // leaders by one scheduling EvictLeaderBatchSize = 3 diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index 63f7cde3b15..eb97be516d7 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) @@ -41,7 +42,7 @@ func TestEvictLeader(t *testing.T) { tc.AddLeaderRegion(2, 2, 1) tc.AddLeaderRegion(3, 3, 1) - sl, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) + sl, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"1"}), func(string) error { return nil }) re.NoError(err) re.True(sl.IsScheduleAllowed(tc)) ops, _ := sl.Schedule(tc, false) @@ -54,7 +55,7 @@ func TestEvictLeaderWithUnhealthyPeer(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sl, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) + sl, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"1"}), func(string) error { return nil }) re.NoError(err) // Add stores 1, 2, 3 @@ -120,7 +121,7 @@ func TestBatchEvict(t *testing.T) { tc.AddLeaderRegion(6, 2, 1, 3) tc.AddLeaderRegion(7, 3, 1, 2) - sl, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) + sl, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"1"}), func(string) error { return nil }) re.NoError(err) re.True(sl.IsScheduleAllowed(tc)) ops, _ := sl.Schedule(tc, false) diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 721444d1da7..17cc986cf37 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -37,8 +37,6 @@ import ( const ( // EvictSlowStoreName is evict leader scheduler name. EvictSlowStoreName = "evict-slow-store-scheduler" - // EvictSlowStoreType is evict leader scheduler type. - EvictSlowStoreType = "evict-slow-store" slowStoreEvictThreshold = 100 slowStoreRecoverThreshold = 1 diff --git a/pkg/schedule/schedulers/evict_slow_store_test.go b/pkg/schedule/schedulers/evict_slow_store_test.go index 440ab85d08e..ad5b16e8ca3 100644 --- a/pkg/schedule/schedulers/evict_slow_store_test.go +++ b/pkg/schedule/schedulers/evict_slow_store_test.go @@ -58,9 +58,9 @@ func (suite *evictSlowStoreTestSuite) SetupTest() { storage := storage.NewStorageWithMemoryBackend() var err error - suite.es, err = CreateScheduler(EvictSlowStoreType, suite.oc, storage, ConfigSliceDecoder(EvictSlowStoreType, []string{}), nil) + suite.es, err = CreateScheduler(types.EvictSlowStoreScheduler, suite.oc, storage, ConfigSliceDecoder(types.EvictSlowStoreScheduler, []string{}), nil) re.NoError(err) - suite.bs, err = CreateScheduler(BalanceLeaderType, suite.oc, storage, ConfigSliceDecoder(BalanceLeaderType, []string{}), nil) + suite.bs, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage, ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{}), nil) re.NoError(err) } diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index d14cec1e06a..a9ee5d7bde8 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -38,8 +38,6 @@ import ( const ( // EvictSlowTrendName is evict leader by slow trend scheduler name. EvictSlowTrendName = "evict-slow-trend-scheduler" - // EvictSlowTrendType is evict leader by slow trend scheduler type. - EvictSlowTrendType = "evict-slow-trend" ) const ( diff --git a/pkg/schedule/schedulers/evict_slow_trend_test.go b/pkg/schedule/schedulers/evict_slow_trend_test.go index c01ae4959ba..10da5c91565 100644 --- a/pkg/schedule/schedulers/evict_slow_trend_test.go +++ b/pkg/schedule/schedulers/evict_slow_trend_test.go @@ -72,9 +72,9 @@ func (suite *evictSlowTrendTestSuite) SetupTest() { storage := storage.NewStorageWithMemoryBackend() var err error - suite.es, err = CreateScheduler(EvictSlowTrendType, suite.oc, storage, ConfigSliceDecoder(EvictSlowTrendType, []string{})) + suite.es, err = CreateScheduler(types.EvictSlowTrendScheduler, suite.oc, storage, ConfigSliceDecoder(types.EvictSlowTrendScheduler, []string{})) re.NoError(err) - suite.bs, err = CreateScheduler(BalanceLeaderType, suite.oc, storage, ConfigSliceDecoder(BalanceLeaderType, []string{})) + suite.bs, err = CreateScheduler(types.BalanceLeaderScheduler, suite.oc, storage, ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{})) re.NoError(err) } diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 4289effd7bd..2ce4a8a922f 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -44,8 +44,6 @@ import ( const ( // GrantHotRegionName is grant hot region scheduler name. GrantHotRegionName = "grant-hot-region-scheduler" - // GrantHotRegionType is grant hot region scheduler type. - GrantHotRegionType = "grant-hot-region" ) type grantHotRegionSchedulerConfig struct { @@ -335,9 +333,9 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.SchedulerCluster, region dstStore := &metapb.Peer{StoreId: destStoreIDs[i]} if isLeader { - op, err = operator.CreateTransferLeaderOperator(GrantHotRegionType+"-leader", cluster, srcRegion, dstStore.StoreId, []uint64{}, operator.OpLeader) + op, err = operator.CreateTransferLeaderOperator(s.GetName()+"-leader", cluster, srcRegion, dstStore.StoreId, []uint64{}, operator.OpLeader) } else { - op, err = operator.CreateMovePeerOperator(GrantHotRegionType+"-move", cluster, srcRegion, operator.OpRegion|operator.OpLeader, srcStore.GetID(), dstStore) + op, err = operator.CreateMovePeerOperator(s.GetName()+"-move", cluster, srcRegion, operator.OpRegion|operator.OpLeader, srcStore.GetID(), dstStore) } op.SetPriorityLevel(constant.High) return diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 41e6debaafa..6247a1ccebe 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -39,8 +39,6 @@ import ( const ( // GrantLeaderName is grant leader scheduler name. GrantLeaderName = "grant-leader-scheduler" - // GrantLeaderType is grant leader scheduler type. - GrantLeaderType = "grant-leader" ) type grantLeaderSchedulerConfig struct { @@ -232,7 +230,7 @@ func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ( continue } - op, err := operator.CreateForceTransferLeaderOperator(GrantLeaderType, cluster, region, id, operator.OpLeader) + op, err := operator.CreateForceTransferLeaderOperator(s.GetName(), cluster, region, id, operator.OpLeader) if err != nil { log.Debug("fail to create grant leader operator", errs.ZapError(err)) continue diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index fe9b3964139..00a8250d43c 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -47,8 +47,6 @@ import ( const ( // HotRegionName is balance hot region scheduler name. HotRegionName = "balance-hot-region-scheduler" - // HotRegionType is balance hot region scheduler type. - HotRegionType = "hot-region" splitHotReadBuckets = "split-hot-read-region" splitHotWriteBuckets = "split-hot-write-region" splitProgressiveRank = 5 diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 120a20c9219..309938cd51b 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -87,7 +87,7 @@ func TestUpgrade(t *testing.T) { cancel, _, _, oc := prepareSchedulersTest() defer cancel() // new - sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(HotRegionType, nil)) + sche, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceHotRegionScheduler, nil)) re.NoError(err) hb := sche.(*hotScheduler) re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) @@ -95,7 +95,7 @@ func TestUpgrade(t *testing.T) { re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) re.Equal("v2", hb.conf.GetRankFormulaVersion()) // upgrade from json(null) - sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb = sche.(*hotScheduler) re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) @@ -104,7 +104,7 @@ func TestUpgrade(t *testing.T) { re.Equal("v2", hb.conf.GetRankFormulaVersion()) // upgrade from < 5.2 config51 := `{"min-hot-byte-rate":100,"min-hot-key-rate":10,"min-hot-query-rate":10,"max-zombie-rounds":5,"max-peer-number":1000,"byte-rate-rank-step-ratio":0.05,"key-rate-rank-step-ratio":0.05,"query-rate-rank-step-ratio":0.05,"count-rank-step-ratio":0.01,"great-dec-ratio":0.95,"minor-dec-ratio":0.99,"src-tolerance-ratio":1.05,"dst-tolerance-ratio":1.05,"strict-picking-store":"true","enable-for-tiflash":"true"}` - sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config51))) + sche, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config51))) re.NoError(err) hb = sche.(*hotScheduler) re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetReadPriorities()) @@ -113,7 +113,7 @@ func TestUpgrade(t *testing.T) { re.Equal("v1", hb.conf.GetRankFormulaVersion()) // upgrade from < 6.4 config54 := `{"min-hot-byte-rate":100,"min-hot-key-rate":10,"min-hot-query-rate":10,"max-zombie-rounds":5,"max-peer-number":1000,"byte-rate-rank-step-ratio":0.05,"key-rate-rank-step-ratio":0.05,"query-rate-rank-step-ratio":0.05,"count-rank-step-ratio":0.01,"great-dec-ratio":0.95,"minor-dec-ratio":0.99,"src-tolerance-ratio":1.05,"dst-tolerance-ratio":1.05,"read-priorities":["query","byte"],"write-leader-priorities":["query","byte"],"write-peer-priorities":["byte","key"],"strict-picking-store":"true","enable-for-tiflash":"true","forbid-rw-type":"none"}` - sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config54))) + sche, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config54))) re.NoError(err) hb = sche.(*hotScheduler) re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) @@ -139,7 +139,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { tc.PutStoreWithLabels(id) } - sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) @@ -1883,7 +1883,7 @@ func checkHotCacheCheckRegionFlow(re *require.Assertions, testCase testHotCacheC tc.SetEnablePlacementRules(enablePlacementRules) labels := []string{"zone", "host"} tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...) - sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) heartbeat := tc.AddLeaderRegionWithWriteInfo @@ -1991,7 +1991,7 @@ func TestHotCacheSortHotPeer(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) leaderSolver := newBalanceSolver(hb, tc, utils.Read, transferLeader) @@ -2430,7 +2430,7 @@ func TestCompatibilityConfig(t *testing.T) { defer cancel() // From new or 3.x cluster, it will use new config - hb, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder("hot-region", nil)) + hb, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceHotRegionScheduler, nil)) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {utils.QueryDim, utils.ByteDim}, @@ -2439,8 +2439,8 @@ func TestCompatibilityConfig(t *testing.T) { }) // Config file is not currently supported - hb, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), - ConfigSliceDecoder("hot-region", []string{"read-priorities=byte,query"})) + hb, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), + ConfigSliceDecoder(types.BalanceHotRegionScheduler, []string{"read-priorities=byte,query"})) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {utils.QueryDim, utils.ByteDim}, @@ -2467,7 +2467,7 @@ func TestCompatibilityConfig(t *testing.T) { re.NoError(err) err = storage.SaveSchedulerConfig(HotRegionName, data) re.NoError(err) - hb, err = CreateScheduler(HotRegionType, oc, storage, ConfigJSONDecoder(data)) + hb, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage, ConfigJSONDecoder(data)) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {utils.ByteDim, utils.KeyDim}, @@ -2483,7 +2483,7 @@ func TestCompatibilityConfig(t *testing.T) { re.NoError(err) err = storage.SaveSchedulerConfig(HotRegionName, data) re.NoError(err) - hb, err = CreateScheduler(HotRegionType, oc, storage, ConfigJSONDecoder(data)) + hb, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage, ConfigJSONDecoder(data)) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {utils.KeyDim, utils.QueryDim}, @@ -2597,7 +2597,7 @@ func TestMaxZombieDuration(t *testing.T) { re := require.New(t) cancel, _, _, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder("hot-region", nil)) + hb, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceHotRegionScheduler, nil)) re.NoError(err) maxZombieDur := hb.(*hotScheduler).conf.getValidConf().MaxZombieRounds testCases := []maxZombieDurTestCase{ @@ -2650,7 +2650,7 @@ func TestExpect(t *testing.T) { re := require.New(t) cancel, _, _, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder("hot-region", nil)) + hb, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceHotRegionScheduler, nil)) re.NoError(err) testCases := []struct { rankVersion string @@ -2958,7 +2958,7 @@ func TestEncodeConfig(t *testing.T) { re := require.New(t) cancel, _, _, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err := CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) data, err := sche.EncodeConfig() re.NoError(err) diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 814f525a76c..16478463212 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -31,8 +31,6 @@ import ( const ( // LabelName is label scheduler name. LabelName = "label-scheduler" - // LabelType is label scheduler type. - LabelType = "label" ) type labelSchedulerConfig struct { diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 2d425746cea..6474e1f2f5c 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -32,8 +32,6 @@ import ( const ( // RandomMergeName is random merge scheduler name. RandomMergeName = "random-merge-scheduler" - // RandomMergeType is random merge scheduler type. - RandomMergeType = "random-merge" ) type randomMergeSchedulerConfig struct { @@ -100,7 +98,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ( return nil, nil } - ops, err := operator.CreateMergeRegionOperator(RandomMergeType, cluster, region, target, operator.OpMerge) + ops, err := operator.CreateMergeRegionOperator(s.GetName(), cluster, region, target, operator.OpMerge) if err != nil { log.Debug("fail to create merge region operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 8874eb19cff..def62098fd0 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -33,8 +33,6 @@ import ( ) const ( - // ScatterRangeType is scatter range scheduler type - ScatterRangeType = "scatter-range" // ScatterRangeName is scatter range scheduler name ScatterRangeName = "scatter-range" ) diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index e6e8d192a24..8dfe9f3616f 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" @@ -63,7 +64,7 @@ func TestShuffleLeader(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sl, err := CreateScheduler(ShuffleLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ShuffleLeaderType, []string{"", ""})) + sl, err := CreateScheduler(types.ShuffleLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ShuffleLeaderScheduler, []string{"", ""})) re.NoError(err) ops, _ := sl.Schedule(tc, false) re.Empty(ops) @@ -101,7 +102,7 @@ func TestRejectLeader(t *testing.T) { tc.AddLeaderRegion(2, 2, 1, 3) // The label scheduler transfers leader out of store1. - sl, err := CreateScheduler(LabelType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(LabelType, []string{"", ""})) + sl, err := CreateScheduler(types.LabelScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.LabelScheduler, []string{"", ""})) re.NoError(err) ops, _ := sl.Schedule(tc, false) operatorutil.CheckTransferLeaderFrom(re, ops[0], operator.OpLeader, 1) @@ -113,13 +114,13 @@ func TestRejectLeader(t *testing.T) { // As store3 is disconnected, store1 rejects leader. Balancer will not create // any operators. - bs, err := CreateScheduler(BalanceLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) + bs, err := CreateScheduler(types.BalanceLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) ops, _ = bs.Schedule(tc, false) re.Empty(ops) // Can't evict leader from store2, neither. - el, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"2"}), func(string) error { return nil }) + el, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"2"}), func(string) error { return nil }) re.NoError(err) ops, _ = el.Schedule(tc, false) re.Empty(ops) @@ -145,7 +146,7 @@ func TestRemoveRejectLeader(t *testing.T) { defer cancel() tc.AddRegionStore(1, 0) tc.AddRegionStore(2, 1) - el, err := CreateScheduler(EvictLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(EvictLeaderType, []string{"1"}), func(string) error { return nil }) + el, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"1"}), func(string) error { return nil }) re.NoError(err) tc.DeleteStore(tc.GetStore(1)) _, err = el.(*evictLeaderScheduler).conf.removeStoreLocked(1) @@ -165,7 +166,7 @@ func checkBalance(re *require.Assertions, enablePlacementRules bool) { tc.SetEnablePlacementRules(enablePlacementRules) labels := []string{"zone", "host"} tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...) - hb, err := CreateScheduler(ShuffleHotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder("shuffle-hot-region", []string{"", ""})) + hb, err := CreateScheduler(types.ShuffleHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ShuffleHotRegionScheduler, []string{"", ""})) re.NoError(err) // Add stores 1, 2, 3, 4, 5, 6 with hot peer counts 3, 2, 2, 2, 0, 0. tc.AddLabelsStore(1, 3, map[string]string{"zone": "z1", "host": "h1"}) @@ -237,7 +238,7 @@ func TestShuffleRegion(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sl, err := CreateScheduler(ShuffleRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ShuffleRegionType, []string{"", ""})) + sl, err := CreateScheduler(types.ShuffleRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ShuffleRegionScheduler, []string{"", ""})) re.NoError(err) re.True(sl.IsScheduleAllowed(tc)) ops, _ := sl.Schedule(tc, false) @@ -301,7 +302,7 @@ func TestShuffleRegionRole(t *testing.T) { }, peers[0]) tc.PutRegion(region) - sl, err := CreateScheduler(ShuffleRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(ShuffleRegionType, []string{"", ""})) + sl, err := CreateScheduler(types.ShuffleRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.ShuffleRegionScheduler, []string{"", ""})) re.NoError(err) conf := sl.(*shuffleRegionScheduler).conf @@ -321,8 +322,8 @@ func TestSpecialUseHotRegion(t *testing.T) { defer cancel() storage := storage.NewStorageWithMemoryBackend() - cd := ConfigSliceDecoder(BalanceRegionType, []string{"", ""}) - bs, err := CreateScheduler(BalanceRegionType, oc, storage, cd) + cd := ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""}) + bs, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage, cd) re.NoError(err) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) @@ -376,8 +377,8 @@ func TestSpecialUseReserved(t *testing.T) { defer cancel() storage := storage.NewStorageWithMemoryBackend() - cd := ConfigSliceDecoder(BalanceRegionType, []string{"", ""}) - bs, err := CreateScheduler(BalanceRegionType, oc, storage, cd) + cd := ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""}) + bs, err := CreateScheduler(types.BalanceRegionScheduler, oc, storage, cd) re.NoError(err) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) @@ -410,7 +411,7 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetEnablePlacementRules(true) - lb, err := CreateScheduler(BalanceLeaderType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) + lb, err := CreateScheduler(types.BalanceLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) tc.AddLeaderStore(1, 1) diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index 32384a19df1..0b9e9f2a872 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -38,8 +38,6 @@ import ( const ( // ShuffleHotRegionName is shuffle hot region scheduler name. ShuffleHotRegionName = "shuffle-hot-region-scheduler" - // ShuffleHotRegionType is shuffle hot region scheduler type. - ShuffleHotRegionType = "shuffle-hot-region" ) type shuffleHotRegionSchedulerConfig struct { diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index ce2c8cd31d5..46e2f0b4db2 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -29,8 +29,6 @@ import ( const ( // ShuffleLeaderName is shuffle leader scheduler name. ShuffleLeaderName = "shuffle-leader-scheduler" - // ShuffleLeaderType is shuffle leader scheduler type. - ShuffleLeaderType = "shuffle-leader" ) type shuffleLeaderSchedulerConfig struct { @@ -91,7 +89,7 @@ func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) shuffleLeaderNoFollowerCounter.Inc() return nil, nil } - op, err := operator.CreateTransferLeaderOperator(ShuffleLeaderType, cluster, region, targetStore.GetID(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, targetStore.GetID(), []uint64{}, operator.OpAdmin) if err != nil { log.Debug("fail to create shuffle leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index b59e97b2a11..85a4573e55e 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -30,8 +30,6 @@ import ( const ( // ShuffleRegionName is shuffle region scheduler name. ShuffleRegionName = "shuffle-region-scheduler" - // ShuffleRegionType is shuffle region scheduler type. - ShuffleRegionType = "shuffle-region" ) type shuffleRegionScheduler struct { @@ -104,7 +102,7 @@ func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) return nil, nil } - op, err := operator.CreateMovePeerOperator(ShuffleRegionType, cluster, region, operator.OpRegion, oldPeer.GetStoreId(), newPeer) + op, err := operator.CreateMovePeerOperator(s.GetName(), cluster, region, operator.OpRegion, oldPeer.GetStoreId(), newPeer) if err != nil { shuffleRegionCreateOperatorFailCounter.Inc() return nil, nil diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 2031e232aee..6d3c4c4091f 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -39,8 +39,6 @@ import ( const ( // SplitBucketName is the split bucket name. SplitBucketName = "split-bucket-scheduler" - // SplitBucketType is the spilt bucket type. - SplitBucketType = "split-bucket" // defaultHotDegree is the default hot region threshold. defaultHotDegree = 3 defaultSplitLimit = 10 @@ -271,7 +269,7 @@ func (s *splitBucketScheduler) splitBucket(plan *splitBucketPlan) []*operator.Op if bytes.Compare(region.GetEndKey(), splitBucket.EndKey) > 0 { splitKey = append(splitKey, splitBucket.EndKey) } - op, err := operator.CreateSplitRegionOperator(SplitBucketType, region, operator.OpSplit, + op, err := operator.CreateSplitRegionOperator(s.GetName(), region, operator.OpSplit, pdpb.CheckPolicy_USEKEY, splitKey) if err != nil { splitBucketCreateOperatorFailCounter.Inc() diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 8b6e9c39f1d..a8fb957bb09 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -30,8 +30,6 @@ import ( const ( // TransferWitnessLeaderName is transfer witness leader scheduler name. TransferWitnessLeaderName = "transfer-witness-leader-scheduler" - // TransferWitnessLeaderType is transfer witness leader scheduler type. - TransferWitnessLeaderType = "transfer-witness-leader" // TransferWitnessLeaderBatchSize is the number of operators to to transfer // leaders by one scheduling transferWitnessLeaderBatchSize = 3 diff --git a/pkg/schedule/schedulers/transfer_witness_leader_test.go b/pkg/schedule/schedulers/transfer_witness_leader_test.go index 1da968d8dc2..046b7aeb53f 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader_test.go +++ b/pkg/schedule/schedulers/transfer_witness_leader_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) @@ -38,7 +39,7 @@ func TestTransferWitnessLeader(t *testing.T) { // Add regions 1 with leader in stores 1 tc.AddLeaderRegion(1, 1, 2, 3) - sl, err := CreateScheduler(TransferWitnessLeaderType, oc, storage.NewStorageWithMemoryBackend(), nil) + sl, err := CreateScheduler(types.TransferWitnessLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) RecvRegionInfo(sl) <- tc.GetRegion(1) re.True(sl.IsScheduleAllowed(tc)) @@ -53,7 +54,7 @@ func TestTransferWitnessLeaderWithUnhealthyPeer(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sl, err := CreateScheduler(TransferWitnessLeaderType, oc, storage.NewStorageWithMemoryBackend(), nil) + sl, err := CreateScheduler(types.TransferWitnessLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) // Add stores 1, 2, 3 diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index d6932c4c63e..0d7f9fdb1ec 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -227,7 +227,7 @@ func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ( if target == nil { continue } - op, err := operator.CreateTransferLeaderOperator(EvictLeaderType, cluster, region, target.GetID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, target.GetID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index cd7f94e001a..c230339f079 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -52,6 +52,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/schedulers" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" @@ -328,7 +329,7 @@ func TestSetOfflineWithReplica(t *testing.T) { func addEvictLeaderScheduler(cluster *RaftCluster, storeID uint64) (evictScheduler schedulers.Scheduler, err error) { args := []string{fmt.Sprintf("%d", storeID)} - evictScheduler, err = schedulers.CreateScheduler(schedulers.EvictLeaderType, cluster.GetOperatorController(), cluster.storage, schedulers.ConfigSliceDecoder(schedulers.EvictLeaderType, args), cluster.GetCoordinator().GetSchedulersController().RemoveScheduler) + evictScheduler, err = schedulers.CreateScheduler(types.EvictLeaderScheduler, cluster.GetOperatorController(), cluster.storage, schedulers.ConfigSliceDecoder(types.EvictLeaderScheduler, args), cluster.GetCoordinator().GetSchedulersController().RemoveScheduler) if err != nil { return } @@ -3046,7 +3047,7 @@ func TestAddScheduler(t *testing.T) { oc := co.GetOperatorController() // test ConfigJSONDecoder create - bl, err := schedulers.CreateScheduler(schedulers.BalanceLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigJSONDecoder([]byte("{}"))) + bl, err := schedulers.CreateScheduler(types.BalanceLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigJSONDecoder([]byte("{}"))) re.NoError(err) conf, err := bl.EncodeConfig() re.NoError(err) @@ -3055,16 +3056,16 @@ func TestAddScheduler(t *testing.T) { re.NoError(err) batch := data["batch"].(float64) re.Equal(4, int(batch)) - gls, err := schedulers.CreateScheduler(schedulers.GrantLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"0"}), controller.RemoveScheduler) + gls, err := schedulers.CreateScheduler(types.GrantLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(types.GrantLeaderScheduler, []string{"0"}), controller.RemoveScheduler) re.NoError(err) re.Error(controller.AddScheduler(gls)) re.Error(controller.RemoveScheduler(gls.GetName())) - gls, err = schedulers.CreateScheduler(schedulers.GrantLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}), controller.RemoveScheduler) + gls, err = schedulers.CreateScheduler(types.GrantLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(types.GrantLeaderScheduler, []string{"1"}), controller.RemoveScheduler) re.NoError(err) re.NoError(controller.AddScheduler(gls)) - hb, err := schedulers.CreateScheduler(schedulers.HotRegionType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigJSONDecoder([]byte("{}"))) + hb, err := schedulers.CreateScheduler(types.BalanceHotRegionScheduler, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigJSONDecoder([]byte("{}"))) re.NoError(err) conf, err = hb.EncodeConfig() re.NoError(err) @@ -3107,10 +3108,10 @@ func TestPersistScheduler(t *testing.T) { oc := co.GetOperatorController() storage := tc.RaftCluster.storage - gls1, err := schedulers.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedulers.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}), controller.RemoveScheduler) + gls1, err := schedulers.CreateScheduler(types.GrantLeaderScheduler, oc, storage, schedulers.ConfigSliceDecoder(types.GrantLeaderScheduler, []string{"1"}), controller.RemoveScheduler) re.NoError(err) re.NoError(controller.AddScheduler(gls1, "1")) - evict, err := schedulers.CreateScheduler(schedulers.EvictLeaderType, oc, storage, schedulers.ConfigSliceDecoder(schedulers.EvictLeaderType, []string{"2"}), controller.RemoveScheduler) + evict, err := schedulers.CreateScheduler(types.EvictLeaderScheduler, oc, storage, schedulers.ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"2"}), controller.RemoveScheduler) re.NoError(err) re.NoError(controller.AddScheduler(evict, "2")) re.Len(controller.GetSchedulerNames(), defaultCount+2) @@ -3133,11 +3134,13 @@ func TestPersistScheduler(t *testing.T) { // whether the schedulers added or removed in dynamic way are recorded in opt _, newOpt, err := newTestScheduleConfig() re.NoError(err) - shuffle, err := schedulers.CreateScheduler(schedulers.ShuffleRegionType, oc, storage, schedulers.ConfigJSONDecoder([]byte("null"))) + shuffle, err := schedulers.CreateScheduler(types.ShuffleRegionScheduler, oc, storage, schedulers.ConfigJSONDecoder([]byte("null"))) re.NoError(err) re.NoError(controller.AddScheduler(shuffle)) // suppose we add a new default enable scheduler - sc.DefaultSchedulers = append(sc.DefaultSchedulers, sc.SchedulerConfig{Type: "shuffle-region"}) + sc.DefaultSchedulers = append(sc.DefaultSchedulers, sc.SchedulerConfig{ + Type: types.SchedulerTypeCompatibleMap[types.ShuffleRegionScheduler], + }) defer func() { sc.DefaultSchedulers = sc.DefaultSchedulers[:len(sc.DefaultSchedulers)-1] }() @@ -3169,10 +3172,10 @@ func TestPersistScheduler(t *testing.T) { co.Run() controller = co.GetSchedulersController() re.Len(controller.GetSchedulerNames(), 3) - bls, err := schedulers.CreateScheduler(schedulers.BalanceLeaderType, oc, storage, schedulers.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""})) + bls, err := schedulers.CreateScheduler(types.BalanceLeaderScheduler, oc, storage, schedulers.ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) re.NoError(controller.AddScheduler(bls)) - brs, err := schedulers.CreateScheduler(schedulers.BalanceRegionType, oc, storage, schedulers.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""})) + brs, err := schedulers.CreateScheduler(types.BalanceRegionScheduler, oc, storage, schedulers.ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) re.NoError(controller.AddScheduler(brs)) re.Len(controller.GetSchedulerNames(), 5) @@ -3221,7 +3224,7 @@ func TestRemoveScheduler(t *testing.T) { oc := co.GetOperatorController() storage := tc.RaftCluster.storage - gls1, err := schedulers.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedulers.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}), controller.RemoveScheduler) + gls1, err := schedulers.CreateScheduler(types.GrantLeaderScheduler, oc, storage, schedulers.ConfigSliceDecoder(types.GrantLeaderScheduler, []string{"1"}), controller.RemoveScheduler) re.NoError(err) re.NoError(controller.AddScheduler(gls1, "1")) re.Len(controller.GetSchedulerNames(), defaultCount+1) @@ -3407,7 +3410,7 @@ func TestStoreOverloaded(t *testing.T) { tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.GetOperatorController() - lb, err := schedulers.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedulers.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""})) + lb, err := schedulers.CreateScheduler(types.BalanceRegionScheduler, oc, tc.storage, schedulers.ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) opt := tc.GetOpts() re.NoError(tc.addRegionStore(4, 100)) @@ -3461,7 +3464,7 @@ func TestStoreOverloadedWithReplace(t *testing.T) { tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.GetOperatorController() - lb, err := schedulers.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedulers.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""})) + lb, err := schedulers.CreateScheduler(types.BalanceRegionScheduler, oc, tc.storage, schedulers.ConfigSliceDecoder(types.BalanceRegionScheduler, []string{"", ""})) re.NoError(err) re.NoError(tc.addRegionStore(4, 100)) @@ -3554,7 +3557,7 @@ func TestController(t *testing.T) { re.NoError(tc.addLeaderRegion(1, 1)) re.NoError(tc.addLeaderRegion(2, 2)) - scheduler, err := schedulers.CreateScheduler(schedulers.BalanceLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""})) + scheduler, err := schedulers.CreateScheduler(types.BalanceLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) lb := &mockLimitScheduler{ Scheduler: scheduler, @@ -3640,7 +3643,7 @@ func TestInterval(t *testing.T) { tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() - lb, err := schedulers.CreateScheduler(schedulers.BalanceLeaderType, co.GetOperatorController(), storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""})) + lb, err := schedulers.CreateScheduler(types.BalanceLeaderScheduler, co.GetOperatorController(), storage.NewStorageWithMemoryBackend(), schedulers.ConfigSliceDecoder(types.BalanceLeaderScheduler, []string{"", ""})) re.NoError(err) sc := schedulers.NewScheduleController(tc.ctx, co.GetCluster(), co.GetOperatorController(), lb) diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 62b7db9d958..f3b8509d2e5 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -29,6 +29,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" sc "github.com/tikv/pd/pkg/schedule/config" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/utils/apiutil" tu "github.com/tikv/pd/pkg/utils/testutil" @@ -656,7 +657,10 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { re.NoError(err) originSchedulers := scheduleConfig.Schedulers - scheduleConfig.Schedulers = sc.SchedulerConfigs{sc.SchedulerConfig{Type: "shuffle-leader", Disable: true}} + scheduleConfig.Schedulers = sc.SchedulerConfigs{sc.SchedulerConfig{ + Type: types.SchedulerTypeCompatibleMap[types.ShuffleLeaderScheduler], + Disable: true, + }} body, err = json.Marshal(scheduleConfig) re.NoError(err) err = tu.CheckPostJSON(tests.TestDialClient, u, body, tu.StatusOK(re)) From 1f20ae6df0c32df8221867320775bdbf1ba603a5 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 15:11:51 +0800 Subject: [PATCH 04/12] fix lint Signed-off-by: okJiang <819421878@qq.com> --- pkg/schedule/schedulers/hot_region.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 2f33dabea9f..717c1413ac4 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -46,7 +46,7 @@ import ( const ( // HotRegionName is balance hot region scheduler name. - HotRegionName = "balance-hot-region-scheduler" + HotRegionName = "balance-hot-region-scheduler" splitHotReadBuckets = "split-hot-read-region" splitHotWriteBuckets = "split-hot-write-region" splitProgressiveRank = 5 From 3ada3f5b201ffdc67f3c02be59cf19b86339cd11 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 15:17:55 +0800 Subject: [PATCH 05/12] fix ci Signed-off-by: okJiang <819421878@qq.com> --- plugin/scheduler_example/evict_leader.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index f5b84dc6663..c7842debdcb 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -48,7 +48,7 @@ const ( ) func init() { - schedulers.RegisterSliceDecoderBuilder(UserEvictLeaderScheduler, func(args []string) schedulers.ConfigDecoder { + schedulers.RegisterSliceDecoderBuilder(userEvictLeaderScheduler, func(args []string) schedulers.ConfigDecoder { return func(v any) error { if len(args) != 1 { return errors.New("should specify the store-id") @@ -71,7 +71,7 @@ func init() { } }) - schedulers.RegisterScheduler(UserEvictLeaderScheduler, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder schedulers.ConfigDecoder, _ ...func(string) error) (schedulers.Scheduler, error) { + schedulers.RegisterScheduler(userEvictLeaderScheduler, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder schedulers.ConfigDecoder, _ ...func(string) error) (schedulers.Scheduler, error) { conf := &evictLeaderSchedulerConfig{StoreIDWitRanges: make(map[uint64][]core.KeyRange), storage: storage} if err := decoder(conf); err != nil { return nil, err From 912cde3751016e422841b7495ba52201f2fac7b6 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 16:18:19 +0800 Subject: [PATCH 06/12] rename variable Signed-off-by: okJiang <819421878@qq.com> --- pkg/mcs/scheduling/server/cluster.go | 2 +- pkg/schedule/coordinator.go | 4 ++-- pkg/schedule/type/type.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index e933643a237..92b6e5211b5 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -309,7 +309,7 @@ func (c *Cluster) updateScheduler() { ) // Create the newly added schedulers. for _, scheduler := range latestSchedulersConfig { - schedulerType := types.SchedulerTypeCompatibleMapReverse[scheduler.Type] + schedulerType := types.ConvertOldStr2Type[scheduler.Type] s, err := schedulers.CreateScheduler( schedulerType, c.coordinator.GetOperatorController(), diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 82beb91d9b3..f614f79364a 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -320,11 +320,11 @@ func (c *Coordinator) InitSchedulers(needRun bool) { continue } - tp := types.SchedulerStr2Type[schedulerCfg.Type] + tp := types.ConvertOldStr2Type[schedulerCfg.Type] s, err := schedulers.CreateScheduler(tp, c.opController, c.cluster.GetStorage(), schedulers.ConfigSliceDecoder(tp, schedulerCfg.Args), c.schedulers.RemoveScheduler) if err != nil { - log.Error("can not create scheduler", zap.Stringer("scheduler-type", tp), + log.Error("can not create scheduler", zap.Stringer("type", tp), zap.String("scheduler-type", schedulerCfg.Type), zap.Strings("scheduler-args", schedulerCfg.Args), errs.ZapError(err)) continue } diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 1bfe7dcdeca..543f0229ce8 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -98,11 +98,11 @@ var ( LabelScheduler: "label", } - // SchedulerTypeCompatibleMapReverse exists for compatibility. + // ConvertOldStr2Type exists for compatibility. // // It is used in the `PersistOptions` and `PersistConfig`. These two structs // are persisted in the storage, so we need to keep the compatibility. - SchedulerTypeCompatibleMapReverse = map[string]CheckerSchedulerType{ + ConvertOldStr2Type = map[string]CheckerSchedulerType{ "balance-leader": BalanceLeaderScheduler, "balance-region": BalanceRegionScheduler, "balance-witness": BalanceWitnessScheduler, From 95fc46bae25a7b37c5ddeba18e74297c1c05d639 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 16:53:30 +0800 Subject: [PATCH 07/12] fix unit test Signed-off-by: okJiang <819421878@qq.com> --- pkg/schedule/coordinator.go | 4 ++-- pkg/schedule/schedulers/scheduler.go | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index f614f79364a..6c15ff24d9c 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -276,7 +276,7 @@ func (c *Coordinator) InitSchedulers(needRun bool) { typ := schedulers.FindSchedulerTypeByName(name) var cfg sc.SchedulerConfig for _, c := range scheduleCfg.Schedulers { - if c.Type == typ { + if c.Type == types.SchedulerTypeCompatibleMap[typ] { cfg = c break } @@ -289,7 +289,7 @@ func (c *Coordinator) InitSchedulers(needRun bool) { log.Info("skip create scheduler with independent configuration", zap.String("scheduler-name", name), zap.String("scheduler-type", cfg.Type), zap.Strings("scheduler-args", cfg.Args)) continue } - s, err := schedulers.CreateScheduler(types.SchedulerStr2Type[cfg.Type], c.opController, + s, err := schedulers.CreateScheduler(types.ConvertOldStr2Type[cfg.Type], c.opController, c.cluster.GetStorage(), schedulers.ConfigJSONDecoder([]byte(data)), c.schedulers.RemoveScheduler) if err != nil { log.Error("can not create scheduler with independent configuration", zap.String("scheduler-name", name), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index dde25789c61..7fce5d9c46e 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -162,12 +162,13 @@ func SaveSchedulerConfig(storage endpoint.ConfigStorage, s Scheduler) error { } // FindSchedulerTypeByName finds the type of the specified name. -func FindSchedulerTypeByName(name string) string { - var typ string +func FindSchedulerTypeByName(name string) types.CheckerSchedulerType { + var typ types.CheckerSchedulerType for registeredType := range schedulerMap { if strings.Contains(name, registeredType.String()) { + // If the name matches multiple types, we should choose the longest one. if len(registeredType) > len(typ) { - typ = registeredType.String() + typ = registeredType } } } From d227f56484d8272445921952bb2703c43c68d113 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Mon, 5 Aug 2024 18:02:14 +0800 Subject: [PATCH 08/12] remove duplicate Signed-off-by: okJiang <819421878@qq.com> --- pkg/mock/mockconfig/mockconfig.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/pkg/mock/mockconfig/mockconfig.go b/pkg/mock/mockconfig/mockconfig.go index f8ccbf02066..5d3a86e2993 100644 --- a/pkg/mock/mockconfig/mockconfig.go +++ b/pkg/mock/mockconfig/mockconfig.go @@ -20,20 +20,11 @@ import ( "github.com/tikv/pd/server/config" ) -// oldName2NewType is used to convert old scheduler name to new scheduler type. -// It is just used for testing. -var oldName2NewType = map[string]types.CheckerSchedulerType{ - "balance-leader": types.BalanceLeaderScheduler, - "balance-region": types.BalanceRegionScheduler, - "hot-region": types.BalanceHotRegionScheduler, - "evict-slow-store": types.EvictLeaderScheduler, -} - // NewTestOptions creates default options for testing. func NewTestOptions() *config.PersistOptions { // register default schedulers in case config check fail. for _, d := range sc.DefaultSchedulers { - sc.RegisterScheduler(oldName2NewType[d.Type]) + sc.RegisterScheduler(types.ConvertOldStr2Type[d.Type]) } c := config.NewConfig() c.Adjust(nil, false) From f91836ae6ae2bba4234c58d31f75fffcc347879d Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 7 Aug 2024 10:44:24 +0800 Subject: [PATCH 09/12] add some comment Signed-off-by: okJiang <819421878@qq.com> --- pkg/schedule/type/type.go | 9 +++++---- server/handler.go | 2 -- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 543f0229ce8..cce3b36b23b 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -73,11 +73,13 @@ const ( LabelScheduler CheckerSchedulerType = "label-scheduler" ) +// TODO: SchedulerTypeCompatibleMap and ConvertOldStr2Type should be removed after +// fixing this issue(https://github.com/tikv/pd/issues/8474). var ( // SchedulerTypeCompatibleMap exists for compatibility. // - // It is used in the `PersistOptions` and `PersistConfig`. These two structs - // are persisted in the storage, so we need to keep the compatibility. + // It is used for `SchedulerConfig` in the `PersistOptions` and `PersistConfig`. + // These two structs are persisted in the storage, so we need to keep the compatibility. SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ BalanceLeaderScheduler: "balance-leader", BalanceRegionScheduler: "balance-region", @@ -100,8 +102,7 @@ var ( // ConvertOldStr2Type exists for compatibility. // - // It is used in the `PersistOptions` and `PersistConfig`. These two structs - // are persisted in the storage, so we need to keep the compatibility. + // It is used to convert the old scheduler type to `CheckerSchedulerType`. ConvertOldStr2Type = map[string]CheckerSchedulerType{ "balance-leader": BalanceLeaderScheduler, "balance-region": BalanceRegionScheduler, diff --git a/server/handler.go b/server/handler.go index 6c99aff5e7e..995f98a3324 100644 --- a/server/handler.go +++ b/server/handler.go @@ -188,8 +188,6 @@ func (h *Handler) GetAllRequestHistoryHotRegion(request *HistoryHotRegionsReques // AddScheduler adds a scheduler. func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) error { - // TODO: remove this map in subsequent PRs, because we need use new type in the `CreateScheduler`. - // name := types.SchedulerTypeCompatibleMap[tp] c, err := h.GetRaftCluster() if err != nil { return err From 7d383836ba6f8c385fe6e8c7f16327c464e5c389 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 7 Aug 2024 13:11:58 +0800 Subject: [PATCH 10/12] fix comment Signed-off-by: okJiang <819421878@qq.com> --- pkg/schedule/type/type.go | 4 ++-- server/api/scheduler.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index cce3b36b23b..501076b604d 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -123,8 +123,8 @@ var ( "label": LabelScheduler, } - // SchedulerStr2Type is a map to convert the scheduler string to the CheckerSchedulerType. - SchedulerStr2Type = map[string]CheckerSchedulerType{ + // StringToSchedulerType is a map to convert the scheduler string to the CheckerSchedulerType. + StringToSchedulerType = map[string]CheckerSchedulerType{ "balance-leader-scheduler": BalanceLeaderScheduler, "balance-region-scheduler": BalanceRegionScheduler, "balance-witness-scheduler": BalanceWitnessScheduler, diff --git a/server/api/scheduler.go b/server/api/scheduler.go index e8b9b54380c..306f67ae058 100644 --- a/server/api/scheduler.go +++ b/server/api/scheduler.go @@ -85,7 +85,7 @@ func (h *schedulerHandler) CreateScheduler(w http.ResponseWriter, r *http.Reques return } - tp, ok := types.SchedulerStr2Type[name] + tp, ok := types.StringToSchedulerType[name] if !ok { h.r.JSON(w, http.StatusBadRequest, "unknown scheduler") return From bdb4d19eabcc40613566b3356b33d6492c863e40 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Thu, 8 Aug 2024 14:48:43 +0800 Subject: [PATCH 11/12] fix comment: rename Signed-off-by: okJiang <819421878@qq.com> --- pkg/mcs/scheduling/server/cluster.go | 2 +- pkg/mock/mockconfig/mockconfig.go | 2 +- pkg/schedule/coordinator.go | 4 ++-- pkg/schedule/type/type.go | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 92b6e5211b5..a5a3a709184 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -309,7 +309,7 @@ func (c *Cluster) updateScheduler() { ) // Create the newly added schedulers. for _, scheduler := range latestSchedulersConfig { - schedulerType := types.ConvertOldStr2Type[scheduler.Type] + schedulerType := types.ConvertOldStrToType[scheduler.Type] s, err := schedulers.CreateScheduler( schedulerType, c.coordinator.GetOperatorController(), diff --git a/pkg/mock/mockconfig/mockconfig.go b/pkg/mock/mockconfig/mockconfig.go index 5d3a86e2993..ccd1f98154d 100644 --- a/pkg/mock/mockconfig/mockconfig.go +++ b/pkg/mock/mockconfig/mockconfig.go @@ -24,7 +24,7 @@ import ( func NewTestOptions() *config.PersistOptions { // register default schedulers in case config check fail. for _, d := range sc.DefaultSchedulers { - sc.RegisterScheduler(types.ConvertOldStr2Type[d.Type]) + sc.RegisterScheduler(types.ConvertOldStrToType[d.Type]) } c := config.NewConfig() c.Adjust(nil, false) diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 6c15ff24d9c..89c99ac90b8 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -289,7 +289,7 @@ func (c *Coordinator) InitSchedulers(needRun bool) { log.Info("skip create scheduler with independent configuration", zap.String("scheduler-name", name), zap.String("scheduler-type", cfg.Type), zap.Strings("scheduler-args", cfg.Args)) continue } - s, err := schedulers.CreateScheduler(types.ConvertOldStr2Type[cfg.Type], c.opController, + s, err := schedulers.CreateScheduler(types.ConvertOldStrToType[cfg.Type], c.opController, c.cluster.GetStorage(), schedulers.ConfigJSONDecoder([]byte(data)), c.schedulers.RemoveScheduler) if err != nil { log.Error("can not create scheduler with independent configuration", zap.String("scheduler-name", name), zap.Strings("scheduler-args", cfg.Args), errs.ZapError(err)) @@ -320,7 +320,7 @@ func (c *Coordinator) InitSchedulers(needRun bool) { continue } - tp := types.ConvertOldStr2Type[schedulerCfg.Type] + tp := types.ConvertOldStrToType[schedulerCfg.Type] s, err := schedulers.CreateScheduler(tp, c.opController, c.cluster.GetStorage(), schedulers.ConfigSliceDecoder(tp, schedulerCfg.Args), c.schedulers.RemoveScheduler) if err != nil { diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 501076b604d..b7e0b26482e 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -73,7 +73,7 @@ const ( LabelScheduler CheckerSchedulerType = "label-scheduler" ) -// TODO: SchedulerTypeCompatibleMap and ConvertOldStr2Type should be removed after +// TODO: SchedulerTypeCompatibleMap and ConvertOldStrToType should be removed after // fixing this issue(https://github.com/tikv/pd/issues/8474). var ( // SchedulerTypeCompatibleMap exists for compatibility. @@ -100,10 +100,10 @@ var ( LabelScheduler: "label", } - // ConvertOldStr2Type exists for compatibility. + // ConvertOldStrToType exists for compatibility. // // It is used to convert the old scheduler type to `CheckerSchedulerType`. - ConvertOldStr2Type = map[string]CheckerSchedulerType{ + ConvertOldStrToType = map[string]CheckerSchedulerType{ "balance-leader": BalanceLeaderScheduler, "balance-region": BalanceRegionScheduler, "balance-witness": BalanceWitnessScheduler, From ebcc65575cb37525675ab4f3eb3ccc5d8bbca6b4 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Thu, 8 Aug 2024 17:09:21 +0800 Subject: [PATCH 12/12] trigger ci Signed-off-by: okJiang <819421878@qq.com>