diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index c86c739f724..955af4b9b4a 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" @@ -343,8 +344,9 @@ func (c *Cluster) updateScheduler() { // Remove the deleted schedulers. for _, name := range schedulersController.GetSchedulerNames() { scheduler := schedulersController.GetScheduler(name) + oldType := types.SchedulerTypeCompatibleMap[scheduler.GetType()] if slice.AnyOf(latestSchedulersConfig, func(i int) bool { - return latestSchedulersConfig[i].Type == scheduler.GetType() + return latestSchedulersConfig[i].Type == oldType }) { continue } diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 2111aa3ddcc..ac59de5b97a 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -36,6 +36,7 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mcs/utils" sc "github.com/tikv/pd/pkg/schedule/config" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/configutil" @@ -646,10 +647,11 @@ func (o *PersistConfig) SetMaxReplicas(replicas int) { } // IsSchedulerDisabled returns if the scheduler is disabled. -func (o *PersistConfig) IsSchedulerDisabled(t string) bool { +func (o *PersistConfig) IsSchedulerDisabled(tp types.CheckerSchedulerType) bool { + oldType := types.SchedulerTypeCompatibleMap[tp] schedulers := o.GetScheduleConfig().Schedulers for _, s := range schedulers { - if t == s.Type { + if oldType == s.Type { return s.Disable } } @@ -739,11 +741,11 @@ func (o *PersistConfig) IsRaftKV2() bool { // AddSchedulerCfg adds the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (*PersistConfig) AddSchedulerCfg(string, []string) {} +func (*PersistConfig) AddSchedulerCfg(types.CheckerSchedulerType, []string) {} // RemoveSchedulerCfg removes the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (*PersistConfig) RemoveSchedulerCfg(string) {} +func (*PersistConfig) RemoveSchedulerCfg(types.CheckerSchedulerType) {} // CheckLabelProperty checks if the label property is satisfied. func (*PersistConfig) CheckLabelProperty(string, []*metapb.StoreLabel) bool { diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index f772219558b..627408e6c43 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -241,7 +241,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { if opController.OperatorCount(operator.OpReplica) < c.conf.GetReplicaScheduleLimit() { return []*operator.Operator{op} } - operator.OperatorLimitCounter.WithLabelValues(c.ruleChecker.Name(), operator.OpReplica.String()).Inc() + operator.IncOperatorLimitCounter(c.ruleChecker.GetType(), operator.OpReplica) c.pendingProcessedRegions.Put(region.GetID(), nil) } } @@ -253,7 +253,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { if opController.OperatorCount(operator.OpReplica) < c.conf.GetReplicaScheduleLimit() { return []*operator.Operator{op} } - operator.OperatorLimitCounter.WithLabelValues(c.replicaChecker.Name(), operator.OpReplica.String()).Inc() + operator.IncOperatorLimitCounter(c.replicaChecker.GetType(), operator.OpReplica) c.pendingProcessedRegions.Put(region.GetID(), nil) } } @@ -270,7 +270,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { if c.mergeChecker != nil { allowed := opController.OperatorCount(operator.OpMerge) < c.conf.GetMergeScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(c.mergeChecker.GetType(), operator.OpMerge.String()).Inc() + operator.IncOperatorLimitCounter(c.mergeChecker.GetType(), operator.OpMerge) } else if ops := c.mergeChecker.Check(region); ops != nil { // It makes sure that two operators can be added successfully altogether. return ops diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 1a7548a1084..65189d35c1d 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/utils/logutil" ) @@ -69,8 +70,8 @@ func NewMergeChecker(ctx context.Context, cluster sche.CheckerCluster, conf conf } // GetType return MergeChecker's type -func (*MergeChecker) GetType() string { - return "merge-checker" +func (*MergeChecker) GetType() types.CheckerSchedulerType { + return types.MergeChecker } // RecordRegionSplit put the recently split region into cache. MergeChecker diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index f75ffe7e882..6be5432125b 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -61,6 +61,11 @@ func (*ReplicaChecker) Name() string { return types.ReplicaChecker.String() } +// GetType return ReplicaChecker's type. +func (*ReplicaChecker) GetType() types.CheckerSchedulerType { + return types.ReplicaChecker +} + // Check verifies a region's replicas, creating an operator.Operator if need. func (r *ReplicaChecker) Check(region *core.RegionInfo) *operator.Operator { replicaCheckerCounter.Inc() diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index a90de0a58d4..a8acb002951 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -75,6 +75,11 @@ func (*RuleChecker) Name() string { return types.RuleChecker.String() } +// GetType returns RuleChecker's type. +func (*RuleChecker) GetType() types.CheckerSchedulerType { + return types.RuleChecker +} + // Check checks if the region matches placement rules and returns Operator to // fix it. func (c *RuleChecker) Check(region *core.RegionInfo) *operator.Operator { diff --git a/pkg/schedule/config/config_provider.go b/pkg/schedule/config/config_provider.go index 90e489f86f3..51ade0edb77 100644 --- a/pkg/schedule/config/config_provider.go +++ b/pkg/schedule/config/config_provider.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" ) @@ -49,9 +50,9 @@ type SchedulerConfigProvider interface { SetSchedulingAllowanceStatus(bool, string) GetStoresLimit() map[uint64]StoreLimitConfig - IsSchedulerDisabled(string) bool - AddSchedulerCfg(string, []string) - RemoveSchedulerCfg(string) + IsSchedulerDisabled(types.CheckerSchedulerType) bool + AddSchedulerCfg(types.CheckerSchedulerType, []string) + RemoveSchedulerCfg(types.CheckerSchedulerType) Persist(endpoint.ConfigStorage) error GetRegionScheduleLimit() uint64 diff --git a/pkg/schedule/operator/metrics.go b/pkg/schedule/operator/metrics.go index 20bb4e6b7ca..74f9ddad0c7 100644 --- a/pkg/schedule/operator/metrics.go +++ b/pkg/schedule/operator/metrics.go @@ -14,7 +14,10 @@ package operator -import "github.com/prometheus/client_golang/prometheus" +import ( + "github.com/prometheus/client_golang/prometheus" + types "github.com/tikv/pd/pkg/schedule/type" +) var ( operatorStepDuration = prometheus.NewHistogramVec( @@ -26,8 +29,7 @@ var ( Buckets: []float64{0.5, 1, 2, 4, 8, 16, 20, 40, 60, 90, 120, 180, 240, 300, 480, 600, 720, 900, 1200, 1800, 3600}, }, []string{"type"}) - // OperatorLimitCounter exposes the counter when meeting limit. - OperatorLimitCounter = prometheus.NewCounterVec( + operatorLimitCounter = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "pd", Subsystem: "schedule", @@ -82,10 +84,15 @@ var ( func init() { prometheus.MustRegister(operatorStepDuration) - prometheus.MustRegister(OperatorLimitCounter) + prometheus.MustRegister(operatorLimitCounter) prometheus.MustRegister(OperatorExceededStoreLimitCounter) prometheus.MustRegister(operatorCounter) prometheus.MustRegister(operatorDuration) prometheus.MustRegister(operatorSizeHist) prometheus.MustRegister(storeLimitCostCounter) } + +// IncOperatorLimitCounter increases the counter of operator meeting limit. +func IncOperatorLimitCounter(typ types.CheckerSchedulerType, kind OpKind) { + operatorLimitCounter.WithLabelValues(typ.String(), kind.String()).Inc() +} diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 899737536e2..6762c8751e4 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -166,7 +166,6 @@ func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.R type balanceLeaderScheduler struct { *BaseScheduler *retryQuota - name string conf *balanceLeaderSchedulerConfig handler http.Handler filters []filter.Filter @@ -176,14 +175,11 @@ type balanceLeaderScheduler struct { // newBalanceLeaderScheduler creates a scheduler that tends to keep leaders on // each store balanced. func newBalanceLeaderScheduler(opController *operator.Controller, conf *balanceLeaderSchedulerConfig, options ...BalanceLeaderCreateOption) Scheduler { - base := NewBaseScheduler(opController) s := &balanceLeaderScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.BalanceLeaderScheduler), retryQuota: newRetryQuota(), - name: BalanceLeaderName, conf: conf, handler: newBalanceLeaderHandler(conf), - filterCounter: filter.NewCounter(types.BalanceLeaderScheduler.String()), } for _, option := range options { option(s) @@ -192,6 +188,7 @@ func newBalanceLeaderScheduler(opController *operator.Controller, conf *balanceL &filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true, OperatorLevel: constant.High}, filter.NewSpecialUseFilter(s.GetName()), } + s.filterCounter = filter.NewCounter(s.GetName()) return s } @@ -202,13 +199,6 @@ func (l *balanceLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reques // BalanceLeaderCreateOption is used to create a scheduler with an option. type BalanceLeaderCreateOption func(s *balanceLeaderScheduler) -// WithBalanceLeaderFilterCounterName sets the filter counter name for the scheduler. -func WithBalanceLeaderFilterCounterName(name string) BalanceLeaderCreateOption { - return func(s *balanceLeaderScheduler) { - s.filterCounter.SetScope(name) - } -} - // WithBalanceLeaderName sets the name for the scheduler. func WithBalanceLeaderName(name string) BalanceLeaderCreateOption { return func(s *balanceLeaderScheduler) { @@ -216,14 +206,6 @@ func WithBalanceLeaderName(name string) BalanceLeaderCreateOption { } } -func (l *balanceLeaderScheduler) GetName() string { - return l.name -} - -func (*balanceLeaderScheduler) GetType() string { - return BalanceLeaderType -} - func (l *balanceLeaderScheduler) EncodeConfig() ([]byte, error) { l.conf.RLock() defer l.conf.RUnlock() @@ -252,7 +234,7 @@ func (l *balanceLeaderScheduler) ReloadConfig() error { func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := l.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(l.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index b26830155b0..3ef01345aea 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -38,7 +38,6 @@ const ( ) type balanceRegionSchedulerConfig struct { - Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -46,6 +45,7 @@ type balanceRegionSchedulerConfig struct { type balanceRegionScheduler struct { *BaseScheduler *retryQuota + name string conf *balanceRegionSchedulerConfig filters []filter.Filter filterCounter *filter.Counter @@ -54,12 +54,11 @@ type balanceRegionScheduler struct { // newBalanceRegionScheduler creates a scheduler that tends to keep regions on // each store balanced. func newBalanceRegionScheduler(opController *operator.Controller, conf *balanceRegionSchedulerConfig, opts ...BalanceRegionCreateOption) Scheduler { - base := NewBaseScheduler(opController) scheduler := &balanceRegionScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.BalanceRegionScheduler), retryQuota: newRetryQuota(), + name: types.BalanceRegionScheduler.String(), conf: conf, - filterCounter: filter.NewCounter(types.BalanceRegionScheduler.String()), } for _, setOption := range opts { setOption(scheduler) @@ -68,6 +67,7 @@ func newBalanceRegionScheduler(opController *operator.Controller, conf *balanceR &filter.StoreStateFilter{ActionScope: scheduler.GetName(), MoveRegion: true, OperatorLevel: constant.Medium}, filter.NewSpecialUseFilter(scheduler.GetName()), } + scheduler.filterCounter = filter.NewCounter(scheduler.GetName()) return scheduler } @@ -77,25 +77,10 @@ type BalanceRegionCreateOption func(s *balanceRegionScheduler) // WithBalanceRegionName sets the name for the scheduler. func WithBalanceRegionName(name string) BalanceRegionCreateOption { return func(s *balanceRegionScheduler) { - s.conf.Name = name + s.name = name } } -// WithBalanceRegionFilterCounterName sets the filter counter name for the scheduler. -func WithBalanceRegionFilterCounterName(name string) BalanceRegionCreateOption { - return func(s *balanceRegionScheduler) { - s.filterCounter.SetScope(name) - } -} - -func (s *balanceRegionScheduler) GetName() string { - return s.conf.Name -} - -func (*balanceRegionScheduler) GetType() string { - return BalanceRegionType -} - func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -103,7 +88,7 @@ func (s *balanceRegionScheduler) EncodeConfig() ([]byte, error) { func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } return allowed } diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 1c4daa62634..319a0f2493a 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -163,7 +163,6 @@ func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, _ *http. type balanceWitnessScheduler struct { *BaseScheduler *retryQuota - name string conf *balanceWitnessSchedulerConfig handler http.Handler filters []filter.Filter @@ -174,11 +173,9 @@ type balanceWitnessScheduler struct { // newBalanceWitnessScheduler creates a scheduler that tends to keep witnesses on // each store balanced. func newBalanceWitnessScheduler(opController *operator.Controller, conf *balanceWitnessSchedulerConfig, options ...BalanceWitnessCreateOption) Scheduler { - base := NewBaseScheduler(opController) s := &balanceWitnessScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.BalanceWitnessScheduler), retryQuota: newRetryQuota(), - name: BalanceWitnessName, conf: conf, handler: newBalanceWitnessHandler(conf), counter: balanceWitnessCounter, @@ -208,21 +205,6 @@ func WithBalanceWitnessCounter(counter *prometheus.CounterVec) BalanceWitnessCre } } -// WithBalanceWitnessName sets the name for the scheduler. -func WithBalanceWitnessName(name string) BalanceWitnessCreateOption { - return func(s *balanceWitnessScheduler) { - s.name = name - } -} - -func (b *balanceWitnessScheduler) GetName() string { - return b.name -} - -func (*balanceWitnessScheduler) GetType() string { - return BalanceWitnessType -} - func (b *balanceWitnessScheduler) EncodeConfig() ([]byte, error) { b.conf.RLock() defer b.conf.RUnlock() @@ -251,7 +233,7 @@ func (b *balanceWitnessScheduler) ReloadConfig() error { func (b *balanceWitnessScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := b.OpController.OperatorCount(operator.OpWitness) < cluster.GetSchedulerConfig().GetWitnessScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(b.GetType(), operator.OpWitness.String()).Inc() + operator.IncOperatorLimitCounter(b.GetType(), operator.OpWitness) } return allowed } diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index f3772757ad3..6cd02d2b555 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/errs" sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/utils/typeutil" ) @@ -61,11 +62,14 @@ func intervalGrow(x time.Duration, maxInterval time.Duration, typ intervalGrowth // BaseScheduler is a basic scheduler for all other complex scheduler type BaseScheduler struct { OpController *operator.Controller + + name string + tp types.CheckerSchedulerType } // NewBaseScheduler returns a basic scheduler -func NewBaseScheduler(opController *operator.Controller) *BaseScheduler { - return &BaseScheduler{OpController: opController} +func NewBaseScheduler(opController *operator.Controller, tp types.CheckerSchedulerType) *BaseScheduler { + return &BaseScheduler{OpController: opController, tp: tp} } func (*BaseScheduler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { @@ -97,3 +101,14 @@ func (*BaseScheduler) PrepareConfig(sche.SchedulerCluster) error { return nil } // CleanConfig does some cleanup work about config. func (*BaseScheduler) CleanConfig(sche.SchedulerCluster) {} + +func (s *BaseScheduler) GetName() string { + if len(s.name) == 0 { + return s.tp.String() + } + return s.name +} + +func (s *BaseScheduler) GetType() types.CheckerSchedulerType { + return s.tp +} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 2adcfbe7e48..3aba9a5d184 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -87,7 +88,6 @@ func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { } func (conf *evictLeaderSchedulerConfig) persistLocked() error { - name := conf.getSchedulerName() data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -95,11 +95,7 @@ func (conf *evictLeaderSchedulerConfig) persistLocked() error { if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*evictLeaderSchedulerConfig) getSchedulerName() string { - return EvictLeaderName + return conf.storage.SaveSchedulerConfig(types.EvictLeaderScheduler.String(), data) } func (conf *evictLeaderSchedulerConfig) getRanges(id uint64) []string { @@ -256,10 +252,9 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ - BaseScheduler: base, + BaseScheduler: NewBaseScheduler(opController, types.EvictLeaderScheduler), conf: conf, handler: handler, } @@ -274,14 +269,6 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (*evictLeaderScheduler) GetName() string { - return EvictLeaderName -} - -func (*evictLeaderScheduler) GetType() string { - return EvictLeaderType -} - func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { return s.conf.encodeConfig() } @@ -301,14 +288,14 @@ func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf), nil + return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf), nil } func uniqueAppendOperator(dst []*operator.Operator, src ...*operator.Operator) []*operator.Operator { @@ -332,11 +319,11 @@ type evictLeaderStoresConf interface { getBatch() int } -func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderBatch(name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { var ops []*operator.Operator batchSize := conf.getBatch() for i := 0; i < batchSize; i++ { - once := scheduleEvictLeaderOnce(name, typ, cluster, conf) + once := scheduleEvictLeaderOnce(name, cluster, conf) // no more regions if len(once) == 0 { break @@ -350,7 +337,7 @@ func scheduleEvictLeaderBatch(name, typ string, cluster sche.SchedulerCluster, c return ops } -func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderOnce(name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { stores := conf.getStores() ops := make([]*operator.Operator, 0, len(stores)) for _, storeID := range stores { @@ -395,7 +382,7 @@ func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, co for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - op, err := operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpLeader) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index c9f10fa610f..721444d1da7 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -26,6 +26,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -72,7 +73,6 @@ func (conf *evictSlowStoreSchedulerConfig) Clone() *evictSlowStoreSchedulerConfi } func (conf *evictSlowStoreSchedulerConfig) persistLocked() error { - name := EvictSlowStoreName data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -80,7 +80,7 @@ func (conf *evictSlowStoreSchedulerConfig) persistLocked() error { if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) + return conf.storage.SaveSchedulerConfig(types.EvictSlowStoreScheduler.String(), data) } func (conf *evictSlowStoreSchedulerConfig) getStores() []uint64 { @@ -193,14 +193,6 @@ func (s *evictSlowStoreScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (*evictSlowStoreScheduler) GetName() string { - return EvictSlowStoreName -} - -func (*evictSlowStoreScheduler) GetType() string { - return EvictSlowStoreType -} - func (s *evictSlowStoreScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -267,14 +259,14 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.SchedulerClust } func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator { - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf) + return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf) } func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { if s.conf.evictStore() != 0 { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } @@ -336,7 +328,7 @@ func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowStoreScheduler(opController *operator.Controller, conf *evictSlowStoreSchedulerConfig) Scheduler { handler := newEvictSlowStoreHandler(conf) return &evictSlowStoreScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowStoreScheduler), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_store_test.go b/pkg/schedule/schedulers/evict_slow_store_test.go index 6ed9764ba7c..440ab85d08e 100644 --- a/pkg/schedule/schedulers/evict_slow_store_test.go +++ b/pkg/schedule/schedulers/evict_slow_store_test.go @@ -25,6 +25,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) @@ -79,7 +80,7 @@ func (suite *evictSlowStoreTestSuite) TestEvictSlowStore() { // Add evict leader scheduler to store 1 ops, _ := suite.es.Schedule(suite.tc, false) operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) - re.Equal(EvictSlowStoreType, ops[0].Desc()) + re.Equal(types.EvictSlowStoreScheduler.String(), ops[0].Desc()) // Cannot balance leaders to store 1 ops, _ = suite.bs.Schedule(suite.tc, false) re.Empty(ops) diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index dc2266b5540..d14cec1e06a 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -27,6 +27,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -86,7 +87,6 @@ func (conf *evictSlowTrendSchedulerConfig) Clone() *evictSlowTrendSchedulerConfi } func (conf *evictSlowTrendSchedulerConfig) persistLocked() error { - name := EvictSlowTrendName data, err := EncodeConfig(conf) failpoint.Inject("persistFail", func() { err = errors.New("fail to persist") @@ -94,7 +94,7 @@ func (conf *evictSlowTrendSchedulerConfig) persistLocked() error { if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) + return conf.storage.SaveSchedulerConfig(types.EvictSlowTrendScheduler.String(), data) } func (conf *evictSlowTrendSchedulerConfig) getStores() []uint64 { @@ -295,14 +295,6 @@ func (s *evictSlowTrendScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (*evictSlowTrendScheduler) GetName() string { - return EvictSlowTrendName -} - -func (*evictSlowTrendScheduler) GetType() string { - return EvictSlowTrendType -} - func (s *evictSlowTrendScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -374,7 +366,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerClus return nil } storeSlowTrendEvictedStatusGauge.WithLabelValues(store.GetAddress(), strconv.FormatUint(store.GetID(), 10)).Set(1) - return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf) + return scheduleEvictLeaderBatch(s.GetName(), cluster, s.conf) } func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { @@ -383,7 +375,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste } allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } @@ -459,7 +451,7 @@ func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowTrendScheduler(opController *operator.Controller, conf *evictSlowTrendSchedulerConfig) Scheduler { handler := newEvictSlowTrendHandler(conf) return &evictSlowTrendScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowTrendScheduler), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_trend_test.go b/pkg/schedule/schedulers/evict_slow_trend_test.go index dd6807f4a85..c01ae4959ba 100644 --- a/pkg/schedule/schedulers/evict_slow_trend_test.go +++ b/pkg/schedule/schedulers/evict_slow_trend_test.go @@ -27,6 +27,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) @@ -155,7 +156,7 @@ func (suite *evictSlowTrendTestSuite) TestEvictSlowTrend() { } ops, _ = suite.es.Schedule(suite.tc, false) operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2, 3}) - re.Equal(EvictSlowTrendType, ops[0].Desc()) + re.Equal(types.EvictSlowTrendScheduler.String(), ops[0].Desc()) re.Zero(es2.conf.candidate()) re.Equal(uint64(1), es2.conf.evictedStore()) // Cannot balance leaders to store 1 diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index a19a4e1bf4b..4289effd7bd 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" @@ -92,18 +93,13 @@ func (conf *grantHotRegionSchedulerConfig) Clone() *grantHotRegionSchedulerConfi } func (conf *grantHotRegionSchedulerConfig) Persist() error { - name := conf.getSchedulerName() conf.RLock() defer conf.RUnlock() data, err := EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*grantHotRegionSchedulerConfig) getSchedulerName() string { - return GrantHotRegionName + return conf.storage.SaveSchedulerConfig(types.GrantHotRegionScheduler.String(), data) } func (conf *grantHotRegionSchedulerConfig) has(storeID uint64) bool { @@ -133,6 +129,7 @@ type grantHotRegionScheduler struct { func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHotRegionSchedulerConfig) *grantHotRegionScheduler { base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base.tp = types.GrantHotRegionScheduler handler := newGrantHotRegionHandler(conf) ret := &grantHotRegionScheduler{ baseHotScheduler: base, @@ -142,14 +139,6 @@ func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHo return ret } -func (*grantHotRegionScheduler) GetName() string { - return GrantHotRegionName -} - -func (*grantHotRegionScheduler) GetType() string { - return GrantHotRegionType -} - func (s *grantHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -180,10 +169,10 @@ func (s *grantHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < conf.GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < conf.GetLeaderScheduleLimit() if !regionAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } if !leaderAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return regionAllowed && leaderAllowed } diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 21900fac85d..41e6debaafa 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -82,18 +83,13 @@ func (conf *grantLeaderSchedulerConfig) Clone() *grantLeaderSchedulerConfig { } func (conf *grantLeaderSchedulerConfig) Persist() error { - name := conf.getSchedulerName() conf.RLock() defer conf.RUnlock() data, err := EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*grantLeaderSchedulerConfig) getSchedulerName() string { - return GrantLeaderName + return conf.storage.SaveSchedulerConfig(types.GrantLeaderScheduler.String(), data) } func (conf *grantLeaderSchedulerConfig) getRanges(id uint64) []string { @@ -159,7 +155,7 @@ type grantLeaderScheduler struct { // newGrantLeaderScheduler creates an admin scheduler that transfers all leaders // to a store. func newGrantLeaderScheduler(opController *operator.Controller, conf *grantLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.GrantLeaderScheduler) handler := newGrantLeaderHandler(conf) return &grantLeaderScheduler{ BaseScheduler: base, @@ -172,14 +168,6 @@ func (s *grantLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (*grantLeaderScheduler) GetName() string { - return GrantLeaderName -} - -func (*grantLeaderScheduler) GetType() string { - return GrantLeaderType -} - func (s *grantLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -226,7 +214,7 @@ func (s *grantLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index f79d8fac760..fe9b3964139 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -34,6 +34,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" @@ -92,7 +93,7 @@ type baseHotScheduler struct { } func newBaseHotScheduler(opController *operator.Controller, sampleDuration time.Duration, sampleInterval time.Duration) *baseHotScheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.BalanceHotRegionScheduler) ret := &baseHotScheduler{ BaseScheduler: base, regionPendings: make(map[uint64]*pendingInfluence), @@ -214,14 +215,6 @@ func newHotScheduler(opController *operator.Controller, conf *hotRegionScheduler return ret } -func (h *hotScheduler) GetName() string { - return h.name -} - -func (*hotScheduler) GetType() string { - return HotRegionType -} - func (h *hotScheduler) EncodeConfig() ([]byte, error) { return h.conf.EncodeConfig() } @@ -281,7 +274,7 @@ func (h *hotScheduler) GetNextInterval(time.Duration) time.Duration { func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetSchedulerConfig().GetHotRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc() + operator.IncOperatorLimitCounter(h.GetType(), operator.OpHotRegion) } return allowed } diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 777c8b3d625..988bbc30475 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -75,7 +75,6 @@ func schedulersRegister() { return err } conf.Ranges = ranges - conf.Name = BalanceRegionName return nil } }) @@ -282,7 +281,6 @@ func schedulersRegister() { return err } conf.Ranges = ranges - conf.Name = LabelName return nil } }) @@ -307,7 +305,6 @@ func schedulersRegister() { return err } conf.Ranges = ranges - conf.Name = RandomMergeName return nil } }) @@ -370,7 +367,6 @@ func schedulersRegister() { } conf.Limit = limit } - conf.Name = ShuffleHotRegionName return nil } }) diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 6b7a98f8d02..814f525a76c 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "go.uber.org/zap" ) @@ -35,7 +36,6 @@ const ( ) type labelSchedulerConfig struct { - Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -50,19 +50,11 @@ type labelScheduler struct { // the store with the specific label. func newLabelScheduler(opController *operator.Controller, conf *labelSchedulerConfig) Scheduler { return &labelScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.LabelScheduler), conf: conf, } } -func (s *labelScheduler) GetName() string { - return s.conf.Name -} - -func (*labelScheduler) GetType() string { - return LabelType -} - func (s *labelScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -70,7 +62,7 @@ func (s *labelScheduler) EncodeConfig() ([]byte, error) { func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/metrics.go b/pkg/schedule/schedulers/metrics.go index f8bd2b4d686..42170e43818 100644 --- a/pkg/schedule/schedulers/metrics.go +++ b/pkg/schedule/schedulers/metrics.go @@ -186,7 +186,7 @@ func grantLeaderCounterWithEvent(event string) prometheus.Counter { } func hotRegionCounterWithEvent(event string) prometheus.Counter { - return schedulerCounter.WithLabelValues(types.HotRegionScheduler.String(), event) + return schedulerCounter.WithLabelValues(types.BalanceHotRegionScheduler.String(), event) } func labelCounterWithEvent(event string) prometheus.Counter { diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index ff96afe03eb..2d425746cea 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -36,7 +37,6 @@ const ( ) type randomMergeSchedulerConfig struct { - Name string `json:"name"` Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -49,21 +49,13 @@ type randomMergeScheduler struct { // newRandomMergeScheduler creates an admin scheduler that randomly picks two adjacent regions // then merges them. func newRandomMergeScheduler(opController *operator.Controller, conf *randomMergeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.RandomMergeScheduler) return &randomMergeScheduler{ BaseScheduler: base, conf: conf, } } -func (s *randomMergeScheduler) GetName() string { - return s.conf.Name -} - -func (*randomMergeScheduler) GetType() string { - return RandomMergeType -} - func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -71,7 +63,7 @@ func (s *randomMergeScheduler) EncodeConfig() ([]byte, error) { func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpMerge) < cluster.GetSchedulerConfig().GetMergeScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpMerge.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpMerge) } return allowed } @@ -80,7 +72,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ( randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). - FilterSource(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.conf.Name, MoveRegion: true, OperatorLevel: constant.Low}). + FilterSource(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true, OperatorLevel: constant.Low}). RandomPick() if store == nil { randomMergeNoSourceStoreCounter.Inc() diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 17c67a154ab..8874eb19cff 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -25,6 +25,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -106,7 +107,6 @@ func (conf *scatterRangeSchedulerConfig) getSchedulerName() string { type scatterRangeScheduler struct { *BaseScheduler - name string config *scatterRangeSchedulerConfig balanceLeader Scheduler balanceRegion Scheduler @@ -115,28 +115,27 @@ type scatterRangeScheduler struct { // newScatterRangeScheduler creates a scheduler that balances the distribution of leaders and regions that in the specified key range. func newScatterRangeScheduler(opController *operator.Controller, config *scatterRangeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.ScatterRangeScheduler) - name := config.getSchedulerName() handler := newScatterRangeHandler(config) scheduler := &scatterRangeScheduler{ BaseScheduler: base, config: config, handler: handler, - name: name, balanceLeader: newBalanceLeaderScheduler( opController, &balanceLeaderSchedulerConfig{Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + // the name will not be persisted WithBalanceLeaderName("scatter-range-leader"), - WithBalanceLeaderFilterCounterName("scatter-range-leader"), ), balanceRegion: newBalanceRegionScheduler( opController, &balanceRegionSchedulerConfig{Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + // the name will not be persisted WithBalanceRegionName("scatter-range-region"), - WithBalanceRegionFilterCounterName("scatter-range-region"), ), } + scheduler.name = config.getSchedulerName() return scheduler } @@ -144,14 +143,6 @@ func (l *scatterRangeScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request l.handler.ServeHTTP(w, r) } -func (l *scatterRangeScheduler) GetName() string { - return l.name -} - -func (*scatterRangeScheduler) GetType() string { - return ScatterRangeType -} - func (l *scatterRangeScheduler) EncodeConfig() ([]byte, error) { l.config.RLock() defer l.config.RUnlock() @@ -185,7 +176,7 @@ func (l *scatterRangeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.SchedulerCluster) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(l.GetType(), operator.OpLeader) } return allowed } @@ -193,7 +184,7 @@ func (l *scatterRangeScheduler) allowBalanceLeader(cluster sche.SchedulerCluster func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.SchedulerCluster) bool { allowed := l.OpController.OperatorCount(operator.OpRange) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(l.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(l.GetType(), operator.OpRegion) } return allowed } diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index abace59a266..894544d9617 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -27,6 +27,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) @@ -36,7 +37,7 @@ type Scheduler interface { http.Handler GetName() string // GetType should in accordance with the name passing to RegisterScheduler() - GetType() string + GetType() types.CheckerSchedulerType EncodeConfig() ([]byte, error) // ReloadConfig reloads the config from the storage. ReloadConfig() error diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index f4b566c56a4..32384a19df1 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" @@ -44,30 +45,23 @@ const ( type shuffleHotRegionSchedulerConfig struct { syncutil.RWMutex storage endpoint.ConfigStorage - Name string `json:"name"` Limit uint64 `json:"limit"` } -func (conf *shuffleHotRegionSchedulerConfig) getSchedulerName() string { - return conf.Name -} - func (conf *shuffleHotRegionSchedulerConfig) Clone() *shuffleHotRegionSchedulerConfig { conf.RLock() defer conf.RUnlock() return &shuffleHotRegionSchedulerConfig{ - Name: conf.Name, Limit: conf.Limit, } } func (conf *shuffleHotRegionSchedulerConfig) persistLocked() error { - name := conf.getSchedulerName() data, err := EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) + return conf.storage.SaveSchedulerConfig(types.ShuffleHotRegionScheduler.String(), data) } func (conf *shuffleHotRegionSchedulerConfig) getLimit() uint64 { @@ -90,6 +84,7 @@ type shuffleHotRegionScheduler struct { func newShuffleHotRegionScheduler(opController *operator.Controller, conf *shuffleHotRegionSchedulerConfig) Scheduler { base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base.tp = types.ShuffleHotRegionScheduler handler := newShuffleHotRegionHandler(conf) ret := &shuffleHotRegionScheduler{ baseHotScheduler: base, @@ -103,14 +98,6 @@ func (s *shuffleHotRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Req s.handler.ServeHTTP(w, r) } -func (s *shuffleHotRegionScheduler) GetName() string { - return s.conf.Name -} - -func (*shuffleHotRegionScheduler) GetType() string { - return ShuffleHotRegionType -} - func (s *shuffleHotRegionScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -139,13 +126,13 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerClus regionAllowed := s.OpController.OperatorCount(operator.OpRegion) < conf.GetRegionScheduleLimit() leaderAllowed := s.OpController.OperatorCount(operator.OpLeader) < conf.GetLeaderScheduleLimit() if !hotRegionAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpHotRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpHotRegion) } if !regionAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } if !leaderAllowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return hotRegionAllowed && regionAllowed && leaderAllowed } diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 17b5fae6448..ce2c8cd31d5 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -51,7 +52,7 @@ func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleL &filter.StoreStateFilter{ActionScope: conf.Name, TransferLeader: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(conf.Name), } - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.ShuffleLeaderScheduler) return &shuffleLeaderScheduler{ BaseScheduler: base, conf: conf, @@ -59,14 +60,6 @@ func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleL } } -func (s *shuffleLeaderScheduler) GetName() string { - return s.conf.Name -} - -func (*shuffleLeaderScheduler) GetType() string { - return ShuffleLeaderType -} - func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(s.conf) } @@ -74,7 +67,7 @@ func (s *shuffleLeaderScheduler) EncodeConfig() ([]byte, error) { func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index 57f6c618962..b59e97b2a11 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -46,7 +47,7 @@ func newShuffleRegionScheduler(opController *operator.Controller, conf *shuffleR &filter.StoreStateFilter{ActionScope: ShuffleRegionName, MoveRegion: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(ShuffleRegionName), } - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.ShuffleRegionScheduler) return &shuffleRegionScheduler{ BaseScheduler: base, conf: conf, @@ -58,14 +59,6 @@ func (s *shuffleRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reques s.conf.ServeHTTP(w, r) } -func (*shuffleRegionScheduler) GetName() string { - return ShuffleRegionName -} - -func (*shuffleRegionScheduler) GetType() string { - return ShuffleRegionType -} - func (s *shuffleRegionScheduler) EncodeConfig() ([]byte, error) { return s.conf.EncodeConfig() } @@ -92,7 +85,7 @@ func (s *shuffleRegionScheduler) ReloadConfig() error { func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpRegion) < cluster.GetSchedulerConfig().GetRegionScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpRegion.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpRegion) } return allowed } diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 7df3ee8f552..2031e232aee 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -28,6 +28,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" @@ -154,7 +155,7 @@ func newSplitBucketHandler(conf *splitBucketSchedulerConfig) http.Handler { } func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucketSchedulerConfig) *splitBucketScheduler { - base := NewBaseScheduler(opController) + base := NewBaseScheduler(opController, types.SplitBucketScheduler) handler := newSplitBucketHandler(conf) ret := &splitBucketScheduler{ BaseScheduler: base, @@ -164,16 +165,6 @@ func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucke return ret } -// GetName returns the name of the split bucket scheduler. -func (*splitBucketScheduler) GetName() string { - return SplitBucketName -} - -// GetType returns the type of the split bucket scheduler. -func (*splitBucketScheduler) GetType() string { - return SplitBucketType -} - func (s *splitBucketScheduler) ReloadConfig() error { s.conf.Lock() defer s.conf.Unlock() @@ -207,7 +198,7 @@ func (s *splitBucketScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) allowed := s.BaseScheduler.OpController.OperatorCount(operator.OpSplit) < s.conf.getSplitLimit() if !allowed { splitBuckerSplitLimitCounter.Inc() - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpSplit.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpSplit) } return allowed } diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 2050194b9ae..8b6e9c39f1d 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + types "github.com/tikv/pd/pkg/schedule/type" ) const ( @@ -48,35 +49,27 @@ type transferWitnessLeaderScheduler struct { // newTransferWitnessLeaderScheduler creates an admin scheduler that transfers witness leader of a region. func newTransferWitnessLeaderScheduler(opController *operator.Controller) Scheduler { return &transferWitnessLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController), + BaseScheduler: NewBaseScheduler(opController, types.TransferWitnessLeaderScheduler), regions: make(chan *core.RegionInfo, transferWitnessLeaderRecvMaxRegionSize), } } -func (*transferWitnessLeaderScheduler) GetName() string { - return TransferWitnessLeaderName -} - -func (*transferWitnessLeaderScheduler) GetType() string { - return TransferWitnessLeaderType -} - func (*transferWitnessLeaderScheduler) IsScheduleAllowed(sche.SchedulerCluster) bool { return true } func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() - return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil + return s.scheduleTransferWitnessLeaderBatch(s.GetName(), cluster, transferWitnessLeaderBatchSize), nil } -func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name, typ string, cluster sche.SchedulerCluster, batchSize int) []*operator.Operator { +func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name string, cluster sche.SchedulerCluster, batchSize int) []*operator.Operator { var ops []*operator.Operator batchLoop: for i := 0; i < batchSize; i++ { select { case region := <-s.regions: - op, err := scheduleTransferWitnessLeader(name, typ, cluster, region) + op, err := scheduleTransferWitnessLeader(name, cluster, region) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) continue @@ -93,7 +86,7 @@ batchLoop: return ops } -func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { +func scheduleTransferWitnessLeader(name string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { @@ -102,7 +95,8 @@ func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerClust for _, peer := range region.GetPendingPeers() { unhealthyPeerStores[peer.GetStoreId()] = struct{}{} } - filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores), &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) + filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores), + &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) candidates := filter.NewCandidates(cluster.GetFollowerStores(region)).FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...) // Compatible with old TiKV transfer leader logic. target := candidates.RandomPick() @@ -116,7 +110,7 @@ func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerClust for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - return operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) + return operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) } // RecvRegionInfo receives a checked region from coordinator diff --git a/pkg/schedule/type/type.go b/pkg/schedule/type/type.go index 16910c631fd..1f6211a9783 100644 --- a/pkg/schedule/type/type.go +++ b/pkg/schedule/type/type.go @@ -52,8 +52,8 @@ const ( GrantLeaderScheduler CheckerSchedulerType = "grant-leader-scheduler" // GrantHotRegionScheduler is grant hot region scheduler name. GrantHotRegionScheduler CheckerSchedulerType = "grant-hot-region-scheduler" - // HotRegionScheduler is balance hot region scheduler name. - HotRegionScheduler CheckerSchedulerType = "balance-hot-region-scheduler" + // BalanceHotRegionScheduler is balance hot region scheduler name. + BalanceHotRegionScheduler CheckerSchedulerType = "balance-hot-region-scheduler" // RandomMergeScheduler is random merge scheduler name. RandomMergeScheduler CheckerSchedulerType = "random-merge-scheduler" // ScatterRangeScheduler is scatter range scheduler name. @@ -73,8 +73,10 @@ const ( LabelScheduler CheckerSchedulerType = "label-scheduler" ) -// SchedulerTypeCompatibleMap temporarily exists for compatibility. -// TODO: remove it after all components use CheckerSchedulerType. +// SchedulerTypeCompatibleMap exists for compatibility. +// +// It is used in the `PersistOptions` and `PersistConfig`. These two structs +// are persisted in the storage, so we need to keep the compatibility. var SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ BalanceLeaderScheduler: "balance-leader", BalanceRegionScheduler: "balance-region", @@ -84,7 +86,7 @@ var SchedulerTypeCompatibleMap = map[CheckerSchedulerType]string{ EvictSlowTrendScheduler: "evict-slow-trend", GrantLeaderScheduler: "grant-leader", GrantHotRegionScheduler: "grant-hot-region", - HotRegionScheduler: "hot-region", + BalanceHotRegionScheduler: "hot-region", RandomMergeScheduler: "random-merge", ScatterRangeScheduler: "scatter-range", ShuffleHotRegionScheduler: "shuffle-hot-region", @@ -105,7 +107,7 @@ var SchedulerStr2Type = map[string]CheckerSchedulerType{ "evict-slow-trend-scheduler": EvictSlowTrendScheduler, "grant-leader-scheduler": GrantLeaderScheduler, "grant-hot-region-scheduler": GrantHotRegionScheduler, - "balance-hot-region-scheduler": HotRegionScheduler, + "balance-hot-region-scheduler": BalanceHotRegionScheduler, "random-merge-scheduler": RandomMergeScheduler, // TODO: update to `scatter-range-scheduler` "scatter-range": ScatterRangeScheduler, diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 9ad797e0ae4..83d4771adc4 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -30,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/schedulers" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -42,6 +43,8 @@ const ( // EvictLeaderType is evict leader scheduler type. EvictLeaderType = "user-evict-leader" noStoreInSchedulerInfo = "No store in user-evict-leader-scheduler-config" + + UserEvictLeaderScheduler types.CheckerSchedulerType = "user-evict-leader-scheduler" ) func init() { @@ -127,18 +130,13 @@ func (conf *evictLeaderSchedulerConfig) Clone() *evictLeaderSchedulerConfig { // Persist saves the config. func (conf *evictLeaderSchedulerConfig) Persist() error { - name := conf.getScheduleName() conf.mu.RLock() defer conf.mu.RUnlock() data, err := schedulers.EncodeConfig(conf) if err != nil { return err } - return conf.storage.SaveSchedulerConfig(name, data) -} - -func (*evictLeaderSchedulerConfig) getScheduleName() string { - return EvictLeaderName + return conf.storage.SaveSchedulerConfig(EvictLeaderName, data) } func (conf *evictLeaderSchedulerConfig) getRanges(id uint64) []string { @@ -160,7 +158,7 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) schedulers.Scheduler { - base := schedulers.NewBaseScheduler(opController) + base := schedulers.NewBaseScheduler(opController, UserEvictLeaderScheduler) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ BaseScheduler: base, @@ -174,17 +172,6 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -// GetName returns the scheduler name. -func (*evictLeaderScheduler) GetName() string { - return EvictLeaderName -} - -// GetType returns the scheduler type. -func (*evictLeaderScheduler) GetType() string { - return EvictLeaderType -} - -// EncodeConfig serializes the config. func (s *evictLeaderScheduler) EncodeConfig() ([]byte, error) { s.conf.mu.RLock() defer s.conf.mu.RUnlock() @@ -217,7 +204,7 @@ func (s *evictLeaderScheduler) CleanConfig(cluster sche.SchedulerCluster) { func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { allowed := s.OpController.OperatorCount(operator.OpLeader) < cluster.GetSchedulerConfig().GetLeaderScheduleLimit() if !allowed { - operator.OperatorLimitCounter.WithLabelValues(s.GetType(), operator.OpLeader.String()).Inc() + operator.IncOperatorLimitCounter(s.GetType(), operator.OpLeader) } return allowed } diff --git a/server/config/persist_options.go b/server/config/persist_options.go index d8a7d69f783..b6963a6645a 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" sc "github.com/tikv/pd/pkg/schedule/config" + types "github.com/tikv/pd/pkg/schedule/type" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/etcdutil" @@ -669,10 +670,11 @@ func (o *PersistOptions) GetSchedulers() sc.SchedulerConfigs { } // IsSchedulerDisabled returns if the scheduler is disabled. -func (o *PersistOptions) IsSchedulerDisabled(t string) bool { +func (o *PersistOptions) IsSchedulerDisabled(tp types.CheckerSchedulerType) bool { + oldType := types.SchedulerTypeCompatibleMap[tp] schedulers := o.GetScheduleConfig().Schedulers for _, s := range schedulers { - if t == s.Type { + if oldType == s.Type { return s.Disable } } @@ -690,33 +692,35 @@ func (o *PersistOptions) GetHotRegionsReservedDays() uint64 { } // AddSchedulerCfg adds the scheduler configurations. -func (o *PersistOptions) AddSchedulerCfg(tp string, args []string) { +func (o *PersistOptions) AddSchedulerCfg(tp types.CheckerSchedulerType, args []string) { + oldType := types.SchedulerTypeCompatibleMap[tp] v := o.GetScheduleConfig().Clone() for i, schedulerCfg := range v.Schedulers { // comparing args is to cover the case that there are schedulers in same type but not with same name // such as two schedulers of type "evict-leader", // one name is "evict-leader-scheduler-1" and the other is "evict-leader-scheduler-2" - if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: tp, Args: args, Disable: false}) { + if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: oldType, Args: args, Disable: false}) { return } - if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: tp, Args: args, Disable: true}) { + if reflect.DeepEqual(schedulerCfg, sc.SchedulerConfig{Type: oldType, Args: args, Disable: true}) { schedulerCfg.Disable = false v.Schedulers[i] = schedulerCfg o.SetScheduleConfig(v) return } } - v.Schedulers = append(v.Schedulers, sc.SchedulerConfig{Type: tp, Args: args, Disable: false}) + v.Schedulers = append(v.Schedulers, sc.SchedulerConfig{Type: oldType, Args: args, Disable: false}) o.SetScheduleConfig(v) } // RemoveSchedulerCfg removes the scheduler configurations. -func (o *PersistOptions) RemoveSchedulerCfg(tp string) { +func (o *PersistOptions) RemoveSchedulerCfg(tp types.CheckerSchedulerType) { + oldType := types.SchedulerTypeCompatibleMap[tp] v := o.GetScheduleConfig().Clone() for i, schedulerCfg := range v.Schedulers { - if tp == schedulerCfg.Type { - if sc.IsDefaultScheduler(tp) { + if oldType == schedulerCfg.Type { + if sc.IsDefaultScheduler(oldType) { schedulerCfg.Disable = true v.Schedulers[i] = schedulerCfg } else { diff --git a/server/handler.go b/server/handler.go index cc924cf9a0b..d36dd6656ae 100644 --- a/server/handler.go +++ b/server/handler.go @@ -186,6 +186,7 @@ func (h *Handler) GetAllRequestHistoryHotRegion(request *HistoryHotRegionsReques // AddScheduler adds a scheduler. func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) error { + // TODO: remove this map in subsequent PRs, because we need use new type in the `CreateScheduler`. name := types.SchedulerTypeCompatibleMap[tp] c, err := h.GetRaftCluster() if err != nil { @@ -208,19 +209,19 @@ func (h *Handler) AddScheduler(tp types.CheckerSchedulerType, args ...string) er log.Error("can not add scheduler handler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args), errs.ZapError(err)) return err } - log.Info("add scheduler handler successfully", zap.String("scheduler-name", name), zap.Strings("scheduler-args", args)) + log.Info("add scheduler handler successfully", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) } else { if err = c.AddScheduler(s, args...); err != nil { log.Error("can not add scheduler", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args), errs.ZapError(err)) return err } - log.Info("add scheduler successfully", zap.String("scheduler-name", name), zap.Strings("scheduler-args", args)) + log.Info("add scheduler successfully", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) } if err = h.opt.Persist(c.GetStorage()); err != nil { log.Error("can not persist scheduler config", errs.ZapError(err)) return err } - log.Info("persist scheduler config successfully", zap.String("scheduler-name", name), zap.Strings("scheduler-args", args)) + log.Info("persist scheduler config successfully", zap.String("scheduler-name", s.GetName()), zap.Strings("scheduler-args", args)) return nil }