diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 5e5e254596a7..df0dbd8bbe33 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -208,21 +208,17 @@ func (h *baseHotScheduler) summaryPendingInfluence(storeInfos map[uint64]*statis } } } + // for metrics for storeID, info := range storeInfos { storeLabel := strconv.FormatUint(storeID, 10) if infl := info.PendingSum; infl != nil && len(infl.Loads) != 0 { utils.ForeachRegionStats(func(rwTy utils.RWType, dim int, kind utils.RegionStatKind) { - setHotPendingInfluenceMetrics(storeLabel, rwTy.String(), utils.DimToString(dim), infl.Loads[kind]) + HotPendingSum.WithLabelValues(storeLabel, rwTy.String(), utils.DimToString(dim)).Set(infl.Loads[kind]) }) } } } -// setHotPendingInfluenceMetrics sets pending influence in hot scheduler. -func setHotPendingInfluenceMetrics(storeLabel, rwTy, dim string, load float64) { - HotPendingSum.WithLabelValues(storeLabel, rwTy, dim).Set(load) -} - func (h *baseHotScheduler) randomRWType() utils.RWType { return h.types[h.r.Int()%len(h.types)] } @@ -332,11 +328,12 @@ func (h *hotScheduler) dispatch(typ utils.RWType, cluster sche.SchedulerCluster) h.Lock() defer h.Unlock() h.updateHistoryLoadConfig(h.conf.GetHistorySampleDuration(), h.conf.GetHistorySampleInterval()) - h.prepareForBalance(typ, cluster) - // it can not move earlier to support to use api and metrics. + // no need to prepare if type is forbidden if h.conf.IsForbidRWType(typ) { return nil } + h.prepareForBalance(typ, cluster) + // it can not move earlier to support to use api and metrics. switch typ { case utils.Read: return h.balanceHotReadRegions(cluster) @@ -387,7 +384,7 @@ func (h *hotScheduler) balanceHotReadRegions(cluster sche.SchedulerCluster) []*o return nil } leaderSolver.cur = leaderSolver.best - if leaderSolver.betterThan(peerSolver.best) { + if leaderSolver.rank.betterThan(peerSolver.best) { if leaderSolver.tryAddPendingInfluence() { return leaderOps } @@ -489,10 +486,16 @@ func (s *solution) getPeersRateFromCache(dim int) float64 { return s.cachedPeersRate[dim] } -// isAvailable returns the solution is available. -// The solution should have no revertRegion and progressiveRank < 0. -func isAvailableV1(s *solution) bool { - return s.progressiveRank < 0 +type rank interface { + isAvailable(*solution) bool + filterUniformStore() (string, bool) + needSearchRevertRegions() bool + setSearchRevertRegions() + calcProgressiveRank() + betterThan(*solution) bool + rankToDimString() string + checkByPriorityAndTolerance(loads []float64, f func(int) bool) bool + checkHistoryLoadsByPriority(loads [][]float64, f func(int) bool) bool } type balanceSolver struct { @@ -524,19 +527,8 @@ type balanceSolver struct { maxPeerNum int minHotDegree int - firstPriorityV2Ratios *rankV2Ratios - secondPriorityV2Ratios *rankV2Ratios - // The rank correlation function used according to the version - isAvailable func(*solution) bool - filterUniformStore func() (string, bool) - needSearchRevertRegions func() bool - setSearchRevertRegions func() - calcProgressiveRank func() - betterThan func(*solution) bool - rankToDimString func() string - checkByPriorityAndTolerance func(loads []float64, f func(int) bool) bool - checkHistoryLoadsByPriority func(loads [][]float64, f func(int) bool) bool + rank rank } func (bs *balanceSolver) init() { @@ -548,9 +540,9 @@ func (bs *balanceSolver) init() { bs.greatDecRatio, bs.minorDecRatio = bs.sche.conf.GetGreatDecRatio(), bs.sche.conf.GetMinorDecRatio() switch bs.sche.conf.GetRankFormulaVersion() { case "v1": - bs.initRankV1() + bs.rank = initRankV1(bs) default: - bs.initRankV2() + bs.rank = initRankV2(bs, bs.greatDecRatio) } // Init store load detail according to the type. @@ -590,31 +582,6 @@ func (bs *balanceSolver) init() { } } -func (bs *balanceSolver) initRankV1() { - bs.isAvailable = isAvailableV1 - bs.filterUniformStore = bs.filterUniformStoreV1 - bs.needSearchRevertRegions = func() bool { return false } - bs.setSearchRevertRegions = func() {} - bs.calcProgressiveRank = bs.calcProgressiveRankV1 - bs.betterThan = bs.betterThanV1 - bs.rankToDimString = bs.rankToDimStringV1 - bs.pickCheckPolicyV1() -} - -func (bs *balanceSolver) pickCheckPolicyV1() { - switch { - case bs.resourceTy == writeLeader: - bs.checkByPriorityAndTolerance = bs.checkByPriorityAndToleranceFirstOnly - bs.checkHistoryLoadsByPriority = bs.checkHistoryLoadsByPriorityAndToleranceFirstOnly - case bs.sche.conf.IsStrictPickingStoreEnabled(): - bs.checkByPriorityAndTolerance = bs.checkByPriorityAndToleranceAllOf - bs.checkHistoryLoadsByPriority = bs.checkHistoryLoadsByPriorityAndToleranceAllOf - default: - bs.checkByPriorityAndTolerance = bs.checkByPriorityAndToleranceFirstOnly - bs.checkHistoryLoadsByPriority = bs.checkHistoryLoadsByPriorityAndToleranceFirstOnly - } -} - func (bs *balanceSolver) isSelectedDim(dim int) bool { return dim == bs.firstPriority || dim == bs.secondPriority } @@ -653,27 +620,6 @@ func (bs *balanceSolver) isValid() bool { return true } -func (bs *balanceSolver) filterUniformStoreV1() (string, bool) { - if !bs.enableExpectation() { - return "", false - } - // Because region is available for src and dst, so stddev is the same for both, only need to calculate one. - isUniformFirstPriority, isUniformSecondPriority := bs.isUniformFirstPriority(bs.cur.srcStore), bs.isUniformSecondPriority(bs.cur.srcStore) - if isUniformFirstPriority && isUniformSecondPriority { - // If both dims are enough uniform, any schedule is unnecessary. - return "all-dim", true - } - if isUniformFirstPriority && (bs.cur.progressiveRank == -1 || bs.cur.progressiveRank == -3) { - // If first priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for second priority dim - return dimToString(bs.firstPriority), true - } - if isUniformSecondPriority && bs.cur.progressiveRank == -2 { - // If second priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for first priority dim - return dimToString(bs.secondPriority), true - } - return "", false -} - // solve travels all the src stores, hot peers, dst stores and select each one of them to make a best scheduling solution. // The comparing between solutions is based on calcProgressiveRank. func (bs *balanceSolver) solve() []*operator.Operator { @@ -682,11 +628,11 @@ func (bs *balanceSolver) solve() []*operator.Operator { } bs.cur = &solution{} tryUpdateBestSolution := func() { - if label, ok := bs.filterUniformStore(); ok { + if label, ok := bs.rank.filterUniformStore(); ok { bs.skipCounter(label).Inc() return } - if bs.isAvailable(bs.cur) && bs.betterThan(bs.best) { + if bs.rank.isAvailable(bs.cur) && bs.rank.betterThan(bs.best) { if newOps := bs.buildOperators(); len(newOps) > 0 { bs.ops = newOps clone := *bs.cur @@ -734,9 +680,9 @@ func (bs *balanceSolver) solve() []*operator.Operator { for _, dstStore := range bs.filterDstStores() { bs.cur.dstStore = dstStore - bs.calcProgressiveRank() + bs.rank.calcProgressiveRank() tryUpdateBestSolution() - if bs.needSearchRevertRegions() { + if bs.rank.needSearchRevertRegions() { hotSchedulerSearchRevertRegionsCounter.Inc() dstStoreID := dstStore.GetID() for _, revertPeerStat := range bs.filteredHotPeers[dstStoreID] { @@ -747,7 +693,7 @@ func (bs *balanceSolver) solve() []*operator.Operator { } bs.cur.revertPeerStat = revertPeerStat bs.cur.revertRegion = revertRegion - bs.calcProgressiveRank() + bs.rank.calcProgressiveRank() tryUpdateBestSolution() } bs.cur.revertPeerStat = nil @@ -757,7 +703,7 @@ func (bs *balanceSolver) solve() []*operator.Operator { } } - bs.setSearchRevertRegions() + bs.rank.setSearchRevertRegions() return bs.ops } @@ -902,7 +848,7 @@ func (bs *balanceSolver) filterSrcStores() map[uint64]*statistics.StoreLoadDetai } func (bs *balanceSolver) checkSrcByPriorityAndTolerance(minLoad, expectLoad *statistics.StoreLoad, toleranceRatio float64) bool { - return bs.checkByPriorityAndTolerance(minLoad.Loads, func(i int) bool { + return bs.rank.checkByPriorityAndTolerance(minLoad.Loads, func(i int) bool { return minLoad.Loads[i] > toleranceRatio*expectLoad.Loads[i] }) } @@ -911,7 +857,7 @@ func (bs *balanceSolver) checkSrcHistoryLoadsByPriorityAndTolerance(current, exp if len(current.HistoryLoads) == 0 { return true } - return bs.checkHistoryLoadsByPriority(current.HistoryLoads, func(i int) bool { + return bs.rank.checkHistoryLoadsByPriority(current.HistoryLoads, func(i int) bool { return slice.AllOf(current.HistoryLoads[i], func(j int) bool { return current.HistoryLoads[i][j] > toleranceRatio*expectLoad.HistoryLoads[i][j] }) @@ -1141,7 +1087,7 @@ func (bs *balanceSolver) pickDstStores(filters []filter.Filter, candidates []*st } func (bs *balanceSolver) checkDstByPriorityAndTolerance(maxLoad, expect *statistics.StoreLoad, toleranceRatio float64) bool { - return bs.checkByPriorityAndTolerance(maxLoad.Loads, func(i int) bool { + return bs.rank.checkByPriorityAndTolerance(maxLoad.Loads, func(i int) bool { return maxLoad.Loads[i]*toleranceRatio < expect.Loads[i] }) } @@ -1150,7 +1096,7 @@ func (bs *balanceSolver) checkDstHistoryLoadsByPriorityAndTolerance(current, exp if len(current.HistoryLoads) == 0 { return true } - return bs.checkHistoryLoadsByPriority(current.HistoryLoads, func(i int) bool { + return bs.rank.checkHistoryLoadsByPriority(current.HistoryLoads, func(i int) bool { return slice.AllOf(current.HistoryLoads[i], func(j int) bool { return current.HistoryLoads[i][j]*toleranceRatio < expect.HistoryLoads[i][j] }) @@ -1214,53 +1160,6 @@ func (bs *balanceSolver) isUniformSecondPriority(store *statistics.StoreLoadDeta return store.IsUniform(bs.secondPriority, stddevThreshold) } -// calcProgressiveRank calculates `bs.cur.progressiveRank`. -// See the comments of `solution.progressiveRank` for more about progressive rank. -// | ↓ firstPriority \ secondPriority → | isBetter | isNotWorsened | Worsened | -// | isBetter | -4 | -3 | -1 / 0 | -// | isNotWorsened | -2 | 1 | 1 | -// | Worsened | 0 | 1 | 1 | -func (bs *balanceSolver) calcProgressiveRankV1() { - bs.cur.progressiveRank = 1 - bs.cur.calcPeersRate(bs.firstPriority, bs.secondPriority) - if bs.cur.getPeersRateFromCache(bs.firstPriority) < bs.getMinRate(bs.firstPriority) && - bs.cur.getPeersRateFromCache(bs.secondPriority) < bs.getMinRate(bs.secondPriority) { - return - } - - if bs.resourceTy == writeLeader { - // For write leader, only compare the first priority. - // If the first priority is better, the progressiveRank is -3. - // Because it is not a solution that needs to be optimized. - if bs.isBetterForWriteLeader() { - bs.cur.progressiveRank = -3 - } - return - } - - isFirstBetter, isSecondBetter := bs.isBetter(bs.firstPriority), bs.isBetter(bs.secondPriority) - isFirstNotWorsened := isFirstBetter || bs.isNotWorsened(bs.firstPriority) - isSecondNotWorsened := isSecondBetter || bs.isNotWorsened(bs.secondPriority) - switch { - case isFirstBetter && isSecondBetter: - // If belonging to the case, all two dim will be more balanced, the best choice. - bs.cur.progressiveRank = -4 - case isFirstBetter && isSecondNotWorsened: - // If belonging to the case, the first priority dim will be more balanced, the second priority dim will be not worsened. - bs.cur.progressiveRank = -3 - case isFirstNotWorsened && isSecondBetter: - // If belonging to the case, the first priority dim will be not worsened, the second priority dim will be more balanced. - bs.cur.progressiveRank = -2 - case isFirstBetter: - // If belonging to the case, the first priority dim will be more balanced, ignore the second priority dim. - bs.cur.progressiveRank = -1 - case isSecondBetter: - // If belonging to the case, the second priority dim will be more balanced, ignore the first priority dim. - // It's a solution that cannot be used directly, but can be optimized. - bs.cur.progressiveRank = 0 - } -} - // isTolerance checks source store and target store by checking the difference value with pendingAmpFactor * pendingPeer. // This will make the hot region scheduling slow even serialize running when each 2 store's pending influence is close. func (bs *balanceSolver) isTolerance(dim int, reverse bool) bool { @@ -1282,39 +1181,6 @@ func (bs *balanceSolver) isTolerance(dim int, reverse bool) bool { return srcRate-pendingAmp*srcPending > dstRate+pendingAmp*dstPending } -func (bs *balanceSolver) getHotDecRatioByPriorities(dim int) (isHot bool, decRatio float64) { - // we use DecRatio(Decline Ratio) to expect that the dst store's rate should still be less - // than the src store's rate after scheduling one peer. - srcRate, dstRate := bs.cur.getExtremeLoad(dim) - peersRate := bs.cur.getPeersRateFromCache(dim) - // Rate may be negative after adding revertRegion, which should be regarded as moving from dst to src. - if peersRate >= 0 { - isHot = peersRate >= bs.getMinRate(dim) - decRatio = (dstRate + peersRate) / math.Max(srcRate-peersRate, 1) - } else { - isHot = -peersRate >= bs.getMinRate(dim) - decRatio = (srcRate - peersRate) / math.Max(dstRate+peersRate, 1) - } - return -} - -func (bs *balanceSolver) isBetterForWriteLeader() bool { - srcRate, dstRate := bs.cur.getExtremeLoad(bs.firstPriority) - peersRate := bs.cur.getPeersRateFromCache(bs.firstPriority) - return srcRate-peersRate >= dstRate+peersRate && bs.isTolerance(bs.firstPriority, false) -} - -func (bs *balanceSolver) isBetter(dim int) bool { - isHot, decRatio := bs.getHotDecRatioByPriorities(dim) - return isHot && decRatio <= bs.greatDecRatio && bs.isTolerance(dim, false) -} - -// isNotWorsened must be true if isBetter is true. -func (bs *balanceSolver) isNotWorsened(dim int) bool { - isHot, decRatio := bs.getHotDecRatioByPriorities(dim) - return !isHot || decRatio <= bs.minorDecRatio -} - func (bs *balanceSolver) getMinRate(dim int) float64 { switch dim { case utils.KeyDim: @@ -1327,79 +1193,12 @@ func (bs *balanceSolver) getMinRate(dim int) float64 { return -1 } -// betterThan checks if `bs.cur` is a better solution than `old`. -func (bs *balanceSolver) betterThanV1(old *solution) bool { - if old == nil || bs.cur.progressiveRank <= splitProgressiveRank { - return true - } - if bs.cur.progressiveRank != old.progressiveRank { - // Smaller rank is better. - return bs.cur.progressiveRank < old.progressiveRank - } - if (bs.cur.revertRegion == nil) != (old.revertRegion == nil) { - // Fewer revertRegions are better. - return bs.cur.revertRegion == nil - } - - if r := bs.compareSrcStore(bs.cur.srcStore, old.srcStore); r < 0 { - return true - } else if r > 0 { - return false - } - - if r := bs.compareDstStore(bs.cur.dstStore, old.dstStore); r < 0 { - return true - } else if r > 0 { - return false - } - - if bs.cur.mainPeerStat != old.mainPeerStat { - // compare region - if bs.resourceTy == writeLeader { - return bs.cur.getPeersRateFromCache(bs.firstPriority) > old.getPeersRateFromCache(bs.firstPriority) - } - - // We will firstly consider ensuring converge faster, secondly reduce oscillation - firstCmp, secondCmp := bs.getRkCmpPrioritiesV1(old) - switch bs.cur.progressiveRank { - case -4: // isBetter(firstPriority) && isBetter(secondPriority) - if firstCmp != 0 { - return firstCmp > 0 - } - return secondCmp > 0 - case -3: // isBetter(firstPriority) && isNotWorsened(secondPriority) - if firstCmp != 0 { - return firstCmp > 0 - } - // prefer smaller second priority rate, to reduce oscillation - return secondCmp < 0 - case -2: // isNotWorsened(firstPriority) && isBetter(secondPriority) - if secondCmp != 0 { - return secondCmp > 0 - } - // prefer smaller first priority rate, to reduce oscillation - return firstCmp < 0 - case -1: // isBetter(firstPriority) - return firstCmp > 0 - // TODO: The smaller the difference between the value and the expectation, the better. - } - } - - return false -} - var dimToStep = [utils.DimLen]float64{ utils.ByteDim: 100, utils.KeyDim: 10, utils.QueryDim: 10, } -func (bs *balanceSolver) getRkCmpPrioritiesV1(old *solution) (firstCmp int, secondCmp int) { - firstCmp = rankCmp(bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority), stepRank(0, dimToStep[bs.firstPriority])) - secondCmp = rankCmp(bs.cur.getPeersRateFromCache(bs.secondPriority), old.getPeersRateFromCache(bs.secondPriority), stepRank(0, dimToStep[bs.secondPriority])) - return -} - // compareSrcStore compares the source store of detail1, detail2, the result is: // 1. if detail1 is better than detail2, return -1 // 2. if detail1 is worse than detail2, return 1 @@ -1504,21 +1303,6 @@ func (bs *balanceSolver) isReadyToBuild() bool { bs.cur.revertRegion != nil && bs.cur.revertRegion.GetID() == bs.cur.revertPeerStat.ID() } -func (bs *balanceSolver) rankToDimStringV1() string { - switch bs.cur.progressiveRank { - case -4: - return "all" - case -3: - return dimToString(bs.firstPriority) - case -2: - return dimToString(bs.secondPriority) - case -1: - return dimToString(bs.firstPriority) + "-only" - default: - return "none" - } -} - func (bs *balanceSolver) buildOperators() (ops []*operator.Operator) { if !bs.isReadyToBuild() { return nil @@ -1544,7 +1328,7 @@ func (bs *balanceSolver) buildOperators() (ops []*operator.Operator) { dstStoreID := bs.cur.dstStore.GetID() sourceLabel := strconv.FormatUint(srcStoreID, 10) targetLabel := strconv.FormatUint(dstStoreID, 10) - dim := bs.rankToDimString() + dim := bs.rank.rankToDimString() currentOp, typ, err := bs.createOperator(bs.cur.region, srcStoreID, dstStoreID) if err == nil { @@ -1919,38 +1703,13 @@ func buildResourceType(rwTy utils.RWType, ty constant.ResourceKind) resourceType panic(fmt.Sprintf("invalid arguments for buildResourceType: rwTy = %v, ty = %v", rwTy, ty)) } -func stringToDim(name string) int { - switch name { - case utils.BytePriority: - return utils.ByteDim - case utils.KeyPriority: - return utils.KeyDim - case utils.QueryPriority: - return utils.QueryDim - } - return utils.ByteDim -} - -func dimToString(dim int) string { - switch dim { - case utils.ByteDim: - return utils.BytePriority - case utils.KeyDim: - return utils.KeyPriority - case utils.QueryDim: - return utils.QueryPriority - default: - return "" - } -} - func prioritiesToDim(priorities []string) (firstPriority int, secondPriority int) { - return stringToDim(priorities[0]), stringToDim(priorities[1]) + return utils.StringToDim(priorities[0]), utils.StringToDim(priorities[1]) } // tooHotNeedSplit returns true if any dim of the hot region is greater than the store threshold. func (bs *balanceSolver) tooHotNeedSplit(store *statistics.StoreLoadDetail, region *statistics.HotPeerStat, splitThresholds float64) bool { - return bs.checkByPriorityAndTolerance(store.LoadPred.Current.Loads, func(i int) bool { + return bs.rank.checkByPriorityAndTolerance(store.LoadPred.Current.Loads, func(i int) bool { return region.Loads[i] > store.LoadPred.Current.Loads[i]*splitThresholds }) } diff --git a/pkg/schedule/schedulers/hot_region_rank_v1.go b/pkg/schedule/schedulers/hot_region_rank_v1.go new file mode 100644 index 000000000000..a91e8be169a7 --- /dev/null +++ b/pkg/schedule/schedulers/hot_region_rank_v1.go @@ -0,0 +1,243 @@ +// Copyright 2017 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedulers + +import ( + "math" + + "github.com/tikv/pd/pkg/statistics/utils" +) + +type rankV1 struct { + *balanceSolver +} + +func initRankV1(r *balanceSolver) *rankV1 { + return &rankV1{balanceSolver: r} +} + +// isAvailable returns the solution is available. +// The solution should have no revertRegion and progressiveRank < 0. +func (*rankV1) isAvailable(s *solution) bool { + return s.progressiveRank < 0 +} + +func (r *rankV1) checkByPriorityAndTolerance(loads []float64, f func(int) bool) bool { + switch { + case r.resourceTy == writeLeader: + return r.checkByPriorityAndToleranceFirstOnly(loads, f) + case r.sche.conf.IsStrictPickingStoreEnabled(): + return r.checkByPriorityAndToleranceAllOf(loads, f) + default: + return r.checkByPriorityAndToleranceFirstOnly(loads, f) + } +} + +func (r *rankV1) checkHistoryLoadsByPriority(loads [][]float64, f func(int) bool) bool { + switch { + case r.resourceTy == writeLeader: + return r.checkHistoryLoadsByPriorityAndToleranceFirstOnly(loads, f) + case r.sche.conf.IsStrictPickingStoreEnabled(): + return r.checkHistoryLoadsByPriorityAndToleranceAllOf(loads, f) + default: + return r.checkHistoryLoadsByPriorityAndToleranceFirstOnly(loads, f) + } +} + +func (r *rankV1) filterUniformStore() (string, bool) { + if !r.enableExpectation() { + return "", false + } + // Because region is available for src and dst, so stddev is the same for both, only need to calculate one. + isUniformFirstPriority, isUniformSecondPriority := r.isUniformFirstPriority(r.cur.srcStore), r.isUniformSecondPriority(r.cur.srcStore) + if isUniformFirstPriority && isUniformSecondPriority { + // If both dims are enough uniform, any schedule is unnecessary. + return "all-dim", true + } + if isUniformFirstPriority && (r.cur.progressiveRank == -1 || r.cur.progressiveRank == -3) { + // If first priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for second priority dim + return utils.DimToString(r.firstPriority), true + } + if isUniformSecondPriority && r.cur.progressiveRank == -2 { + // If second priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for first priority dim + return utils.DimToString(r.secondPriority), true + } + return "", false +} + +// calcProgressiveRank calculates `r.cur.progressiveRank`. +// See the comments of `solution.progressiveRank` for more about progressive rank. +// | ↓ firstPriority \ secondPriority → | isBetter | isNotWorsened | Worsened | +// | isBetter | -4 | -3 | -1 / 0 | +// | isNotWorsened | -2 | 1 | 1 | +// | Worsened | 0 | 1 | 1 | +func (r *rankV1) calcProgressiveRank() { + r.cur.progressiveRank = 1 + r.cur.calcPeersRate(r.firstPriority, r.secondPriority) + if r.cur.getPeersRateFromCache(r.firstPriority) < r.getMinRate(r.firstPriority) && + r.cur.getPeersRateFromCache(r.secondPriority) < r.getMinRate(r.secondPriority) { + return + } + + if r.resourceTy == writeLeader { + // For write leader, only compare the first priority. + // If the first priority is better, the progressiveRank is -3. + // Because it is not a solution that needs to be optimized. + if r.isBetterForWriteLeader() { + r.cur.progressiveRank = -3 + } + return + } + + isFirstBetter, isSecondBetter := r.isBetter(r.firstPriority), r.isBetter(r.secondPriority) + isFirstNotWorsened := isFirstBetter || r.isNotWorsened(r.firstPriority) + isSecondNotWorsened := isSecondBetter || r.isNotWorsened(r.secondPriority) + switch { + case isFirstBetter && isSecondBetter: + // If belonging to the case, all two dim will be more balanced, the best choice. + r.cur.progressiveRank = -4 + case isFirstBetter && isSecondNotWorsened: + // If belonging to the case, the first priority dim will be more balanced, the second priority dim will be not worsened. + r.cur.progressiveRank = -3 + case isFirstNotWorsened && isSecondBetter: + // If belonging to the case, the first priority dim will be not worsened, the second priority dim will be more balanced. + r.cur.progressiveRank = -2 + case isFirstBetter: + // If belonging to the case, the first priority dim will be more balanced, ignore the second priority dim. + r.cur.progressiveRank = -1 + case isSecondBetter: + // If belonging to the case, the second priority dim will be more balanced, ignore the first priority dim. + // It's a solution that cannot be used directly, but can be optimized. + r.cur.progressiveRank = 0 + } +} + +// betterThan checks if `r.cur` is a better solution than `old`. +func (r *rankV1) betterThan(old *solution) bool { + if old == nil || r.cur.progressiveRank <= splitProgressiveRank { + return true + } + if r.cur.progressiveRank != old.progressiveRank { + // Smaller rank is better. + return r.cur.progressiveRank < old.progressiveRank + } + if (r.cur.revertRegion == nil) != (old.revertRegion == nil) { + // Fewer revertRegions are better. + return r.cur.revertRegion == nil + } + + if r := r.compareSrcStore(r.cur.srcStore, old.srcStore); r < 0 { + return true + } else if r > 0 { + return false + } + + if r := r.compareDstStore(r.cur.dstStore, old.dstStore); r < 0 { + return true + } else if r > 0 { + return false + } + + if r.cur.mainPeerStat != old.mainPeerStat { + // compare region + if r.resourceTy == writeLeader { + return r.cur.getPeersRateFromCache(r.firstPriority) > old.getPeersRateFromCache(r.firstPriority) + } + + // We will firstly consider ensuring converge faster, secondly reduce oscillation + firstCmp, secondCmp := r.getRkCmpPriorities(old) + switch r.cur.progressiveRank { + case -4: // isBetter(firstPriority) && isBetter(secondPriority) + if firstCmp != 0 { + return firstCmp > 0 + } + return secondCmp > 0 + case -3: // isBetter(firstPriority) && isNotWorsened(secondPriority) + if firstCmp != 0 { + return firstCmp > 0 + } + // prefer smaller second priority rate, to reduce oscillation + return secondCmp < 0 + case -2: // isNotWorsened(firstPriority) && isBetter(secondPriority) + if secondCmp != 0 { + return secondCmp > 0 + } + // prefer smaller first priority rate, to reduce oscillation + return firstCmp < 0 + case -1: // isBetter(firstPriority) + return firstCmp > 0 + // TODO: The smaller the difference between the value and the expectation, the better. + } + } + + return false +} + +func (r *rankV1) getRkCmpPriorities(old *solution) (firstCmp int, secondCmp int) { + firstCmp = rankCmp(r.cur.getPeersRateFromCache(r.firstPriority), old.getPeersRateFromCache(r.firstPriority), stepRank(0, dimToStep[r.firstPriority])) + secondCmp = rankCmp(r.cur.getPeersRateFromCache(r.secondPriority), old.getPeersRateFromCache(r.secondPriority), stepRank(0, dimToStep[r.secondPriority])) + return +} + +func (r *rankV1) rankToDimString() string { + switch r.cur.progressiveRank { + case -4: + return "all" + case -3: + return utils.DimToString(r.firstPriority) + case -2: + return utils.DimToString(r.secondPriority) + case -1: + return utils.DimToString(r.firstPriority) + "-only" + default: + return "none" + } +} + +func (*rankV1) needSearchRevertRegions() bool { return false } +func (*rankV1) setSearchRevertRegions() {} + +func (r *rankV1) isBetterForWriteLeader() bool { + srcRate, dstRate := r.cur.getExtremeLoad(r.firstPriority) + peersRate := r.cur.getPeersRateFromCache(r.firstPriority) + return srcRate-peersRate >= dstRate+peersRate && r.isTolerance(r.firstPriority, false) +} + +func (r *rankV1) isBetter(dim int) bool { + isHot, decRatio := r.getHotDecRatioByPriorities(dim) + return isHot && decRatio <= r.greatDecRatio && r.isTolerance(dim, false) +} + +// isNotWorsened must be true if isBetter is true. +func (r *rankV1) isNotWorsened(dim int) bool { + isHot, decRatio := r.getHotDecRatioByPriorities(dim) + return !isHot || decRatio <= r.minorDecRatio +} + +func (r *rankV1) getHotDecRatioByPriorities(dim int) (isHot bool, decRatio float64) { + // we use DecRatio(Decline Ratio) to expect that the dst store's rate should still be less + // than the src store's rate after scheduling one peer. + srcRate, dstRate := r.cur.getExtremeLoad(dim) + peersRate := r.cur.getPeersRateFromCache(dim) + // Rate may be negative after adding revertRegion, which should be regarded as moving from dst to src. + if peersRate >= 0 { + isHot = peersRate >= r.getMinRate(dim) + decRatio = (dstRate + peersRate) / math.Max(srcRate-peersRate, 1) + } else { + isHot = -peersRate >= r.getMinRate(dim) + decRatio = (srcRate - peersRate) / math.Max(dstRate+peersRate, 1) + } + return +} diff --git a/pkg/schedule/schedulers/hot_region_v2.go b/pkg/schedule/schedulers/hot_region_rank_v2.go similarity index 77% rename from pkg/schedule/schedulers/hot_region_v2.go rename to pkg/schedule/schedulers/hot_region_rank_v2.go index 50016231cada..864364825793 100644 --- a/pkg/schedule/schedulers/hot_region_v2.go +++ b/pkg/schedule/schedulers/hot_region_rank_v2.go @@ -31,14 +31,6 @@ const ( secondPriorityMinHotRatio = 0.03 // PeerRate needs to be greater than 3% lowRate ) -// isAvailable returns the solution is available. -// If the solution has no revertRegion, progressiveRank should < 0. -// If the solution has some revertRegion, progressiveRank should equal to -4 or -3. -func isAvailableV2(s *solution) bool { - // TODO: Test if revert region can be enabled for -1. - return s.progressiveRank <= -3 || (s.progressiveRank < 0 && s.revertRegion == nil) -} - type balanceChecker struct { // We use the following example to illustrate the calculation process. // Suppose preBalancedRatio is 0.9, balancedRatio is 0.95. @@ -59,7 +51,7 @@ type balanceChecker struct { // If the ratio is between the two, it is considered to be in the pre-balanced state. // TODO: Unified with stddevThreshold. -type rankV2Ratios struct { +type rankRatios struct { // futureChecker is used to calculate the balanced state after the operator is completed. // It is stricter than the currentChecker. futureChecker *balanceChecker @@ -75,7 +67,7 @@ type rankV2Ratios struct { minHotRatio float64 } -func newRankV2Ratios(balancedRatio, perceivedRatio, minHotRatio float64) *rankV2Ratios { +func newRankRatios(balancedRatio, perceivedRatio, minHotRatio float64) *rankRatios { // limit 0.7 <= balancedRatio <= 0.95 if balancedRatio < 0.7 { balancedRatio = 0.7 @@ -95,7 +87,7 @@ func newRankV2Ratios(balancedRatio, perceivedRatio, minHotRatio float64) *rankV2 preBalancedRatio: futureStateChecker.preBalancedRatio - 0.03, } - rs := &rankV2Ratios{ + rs := &rankRatios{ futureChecker: futureStateChecker, currentChecker: currentStateChecker, perceivedRatio: perceivedRatio, minHotRatio: minHotRatio} @@ -103,54 +95,70 @@ func newRankV2Ratios(balancedRatio, perceivedRatio, minHotRatio float64) *rankV2 return rs } -func (bs *balanceSolver) initRankV2() { - bs.firstPriorityV2Ratios = newRankV2Ratios(bs.sche.conf.GetGreatDecRatio(), firstPriorityPerceivedRatio, firstPriorityMinHotRatio) - // The second priority is less demanding. Set the preBalancedRatio of the first priority to the balancedRatio of the second dimension. - bs.secondPriorityV2Ratios = newRankV2Ratios(bs.firstPriorityV2Ratios.futureChecker.preBalancedRatio, secondPriorityPerceivedRatio, secondPriorityMinHotRatio) - - bs.isAvailable = isAvailableV2 - bs.filterUniformStore = bs.filterUniformStoreV2 - bs.needSearchRevertRegions = bs.needSearchRevertRegionsV2 - bs.setSearchRevertRegions = bs.setSearchRevertRegionsV2 - bs.calcProgressiveRank = bs.calcProgressiveRankV2 - bs.betterThan = bs.betterThanV2 - bs.rankToDimString = bs.rankToDimStringV2 - bs.pickCheckPolicyV2() +type rankV2 struct { + *balanceSolver + + firstPriorityRatios *rankRatios + secondPriorityRatios *rankRatios +} + +func initRankV2(bs *balanceSolver, balanceRatio float64) *rankV2 { + firstPriorityRatios := newRankRatios(balanceRatio, firstPriorityPerceivedRatio, firstPriorityMinHotRatio) + return &rankV2{ + balanceSolver: bs, + firstPriorityRatios: firstPriorityRatios, + secondPriorityRatios: newRankRatios(firstPriorityRatios.futureChecker.preBalancedRatio, secondPriorityPerceivedRatio, secondPriorityMinHotRatio), + } +} + +// isAvailable returns the solution is available. +// If the solution has no revertRegion, progressiveRank should < 0. +// If the solution has some revertRegion, progressiveRank should equal to -4 or -3. +func (*rankV2) isAvailable(s *solution) bool { + // TODO: Test if revert region can be enabled for -1. + return s.progressiveRank <= -3 || (s.progressiveRank < 0 && s.revertRegion == nil) +} + +func (r *rankV2) checkByPriorityAndTolerance(loads []float64, f func(int) bool) bool { + switch { + case r.resourceTy == writeLeader: + return r.checkByPriorityAndToleranceFirstOnly(loads, f) + default: + return r.checkByPriorityAndToleranceAnyOf(loads, f) + } } // pickCheckPolicyV2 will set checkByPriorityAndTolerance to the corresponding function. // Note: PolicyV2 will search more possible solutions than PolicyV1. // so it allows to schedule when any of the two dimensions is not balanced. -func (bs *balanceSolver) pickCheckPolicyV2() { +func (r *rankV2) checkHistoryLoadsByPriority(loads [][]float64, f func(int) bool) bool { switch { - case bs.resourceTy == writeLeader: - bs.checkByPriorityAndTolerance = bs.checkByPriorityAndToleranceFirstOnly - bs.checkHistoryLoadsByPriority = bs.checkHistoryLoadsByPriorityAndToleranceFirstOnly + case r.resourceTy == writeLeader: + return r.checkHistoryLoadsByPriorityAndToleranceFirstOnly(loads, f) default: - bs.checkByPriorityAndTolerance = bs.checkByPriorityAndToleranceAnyOf - bs.checkHistoryLoadsByPriority = bs.checkHistoryByPriorityAndToleranceAnyOf + return r.checkHistoryByPriorityAndToleranceAnyOf(loads, f) } } // filterUniformStoreV2 filters stores by stddev. // stddev is the standard deviation of the store's load for all stores. -func (bs *balanceSolver) filterUniformStoreV2() (string, bool) { - if !bs.enableExpectation() { +func (r *rankV2) filterUniformStore() (string, bool) { + if !r.enableExpectation() { return "", false } // Because region is available for src and dst, so stddev is the same for both, only need to calculate one. - isUniformFirstPriority, isUniformSecondPriority := bs.isUniformFirstPriority(bs.cur.srcStore), bs.isUniformSecondPriority(bs.cur.srcStore) + isUniformFirstPriority, isUniformSecondPriority := r.isUniformFirstPriority(r.cur.srcStore), r.isUniformSecondPriority(r.cur.srcStore) if isUniformFirstPriority && isUniformSecondPriority { // If both dims are enough uniform, any schedule is unnecessary. return "all-dim", true } - if isUniformFirstPriority && (bs.cur.progressiveRank == -2 || bs.cur.progressiveRank == -3) { + if isUniformFirstPriority && (r.cur.progressiveRank == -2 || r.cur.progressiveRank == -3) { // If first priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for second priority dim - return utils.DimToString(bs.firstPriority), true + return utils.DimToString(r.firstPriority), true } - if isUniformSecondPriority && bs.cur.progressiveRank == -1 { + if isUniformSecondPriority && r.cur.progressiveRank == -1 { // If second priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for first priority dim - return utils.DimToString(bs.secondPriority), true + return utils.DimToString(r.secondPriority), true } return "", false } @@ -161,28 +169,28 @@ func (bs *balanceSolver) filterUniformStoreV2() (string, bool) { // * The current best solution is not good enough. // - The current best solution has progressiveRank < -2 , but contain revert regions. // - The current best solution has progressiveRank >= -2. -func (bs *balanceSolver) needSearchRevertRegionsV2() bool { - if !bs.sche.searchRevertRegions[bs.resourceTy] { +func (r *rankV2) needSearchRevertRegions() bool { + if !r.sche.searchRevertRegions[r.resourceTy] { return false } - return (bs.cur.progressiveRank == -2 || bs.cur.progressiveRank == 0) && - (bs.best == nil || bs.best.progressiveRank >= -2 || bs.best.revertRegion != nil) + return (r.cur.progressiveRank == -2 || r.cur.progressiveRank == 0) && + (r.best == nil || r.best.progressiveRank >= -2 || r.best.revertRegion != nil) } -func (bs *balanceSolver) setSearchRevertRegionsV2() { +func (r *rankV2) setSearchRevertRegions() { // The next solve is allowed to search-revert-regions only when the following conditions are met. // * No best solution was found this time. // * The progressiveRank of the best solution == -2. (first is better, second is worsened) // * The best solution contain revert regions. - searchRevertRegions := bs.best == nil || bs.best.progressiveRank == -2 || bs.best.revertRegion != nil - bs.sche.searchRevertRegions[bs.resourceTy] = searchRevertRegions + searchRevertRegions := r.best == nil || r.best.progressiveRank == -2 || r.best.revertRegion != nil + r.sche.searchRevertRegions[r.resourceTy] = searchRevertRegions if searchRevertRegions { - event := fmt.Sprintf("%s-%s-allow-search-revert-regions", bs.rwTy.String(), bs.opTy.String()) - schedulerCounter.WithLabelValues(bs.sche.GetName(), event).Inc() + event := fmt.Sprintf("%s-%s-allow-search-revert-regions", r.rwTy.String(), r.opTy.String()) + schedulerCounter.WithLabelValues(r.sche.GetName(), event).Inc() } } -// calcProgressiveRankV2 calculates `bs.cur.progressiveRank`. +// calcProgressiveRankV2 calculates `r.cur.progressiveRank`. // See the comments of `solution.progressiveRank` for more about progressive rank. // isBetter: score > 0 // isNotWorsened: score == 0 @@ -191,48 +199,48 @@ func (bs *balanceSolver) setSearchRevertRegionsV2() { // | isBetter | -4 | -3 | -2 | // | isNotWorsened | -1 | 1 | 1 | // | isWorsened | 0 | 1 | 1 | -func (bs *balanceSolver) calcProgressiveRankV2() { - bs.cur.progressiveRank = 1 - bs.cur.calcPeersRate(bs.firstPriority, bs.secondPriority) - if bs.cur.getPeersRateFromCache(bs.firstPriority) < bs.getMinRate(bs.firstPriority) && - bs.cur.getPeersRateFromCache(bs.secondPriority) < bs.getMinRate(bs.secondPriority) { +func (r *rankV2) calcProgressiveRank() { + r.cur.progressiveRank = 1 + r.cur.calcPeersRate(r.firstPriority, r.secondPriority) + if r.cur.getPeersRateFromCache(r.firstPriority) < r.getMinRate(r.firstPriority) && + r.cur.getPeersRateFromCache(r.secondPriority) < r.getMinRate(r.secondPriority) { return } - if bs.resourceTy == writeLeader { + if r.resourceTy == writeLeader { // For write leader, only compare the first priority. // If the first priority is better, the progressiveRank is -3. // Because it is not a solution that needs to be optimized. - if bs.getScoreByPriorities(bs.firstPriority, bs.firstPriorityV2Ratios) > 0 { - bs.cur.progressiveRank = -3 + if r.getScoreByPriorities(r.firstPriority, r.firstPriorityRatios) > 0 { + r.cur.progressiveRank = -3 } return } - firstScore := bs.getScoreByPriorities(bs.firstPriority, bs.firstPriorityV2Ratios) - secondScore := bs.getScoreByPriorities(bs.secondPriority, bs.secondPriorityV2Ratios) - bs.cur.firstScore, bs.cur.secondScore = firstScore, secondScore + firstScore := r.getScoreByPriorities(r.firstPriority, r.firstPriorityRatios) + secondScore := r.getScoreByPriorities(r.secondPriority, r.secondPriorityRatios) + r.cur.firstScore, r.cur.secondScore = firstScore, secondScore switch { case firstScore > 0 && secondScore > 0: // If belonging to the case, all two dim will be more balanced, the best choice. - bs.cur.progressiveRank = -4 + r.cur.progressiveRank = -4 case firstScore > 0 && secondScore == 0: // If belonging to the case, the first priority dim will be more balanced, the second priority dim will be not worsened. - bs.cur.progressiveRank = -3 + r.cur.progressiveRank = -3 case firstScore > 0: // If belonging to the case, the first priority dim will be more balanced, ignore the second priority dim. - bs.cur.progressiveRank = -2 + r.cur.progressiveRank = -2 case firstScore == 0 && secondScore > 0: // If belonging to the case, the first priority dim will be not worsened, the second priority dim will be more balanced. - bs.cur.progressiveRank = -1 + r.cur.progressiveRank = -1 case secondScore > 0: // If belonging to the case, the second priority dim will be more balanced, ignore the first priority dim. // It's a solution that cannot be used directly, but can be optimized. - bs.cur.progressiveRank = 0 + r.cur.progressiveRank = 0 } } -func (bs *balanceSolver) getScoreByPriorities(dim int, rs *rankV2Ratios) int { +func (r *rankV2) getScoreByPriorities(dim int, rs *rankRatios) int { // For unbalanced state, // roughly speaking, as long as the diff is reduced, it is either better or not worse. // To distinguish the small regions, the one where the diff is reduced too little is defined as not worse, @@ -260,17 +268,17 @@ func (bs *balanceSolver) getScoreByPriorities(dim int, rs *rankV2Ratios) int { // * f: maxBetterRate < peersRate <= maxNotWorsenedRate ====> score == -1 // * g: peersRate > maxNotWorsenedRate ====> score == -2 - srcRate, dstRate := bs.cur.getExtremeLoad(dim) - srcPendingRate, dstPendingRate := bs.cur.getPendingLoad(dim) - peersRate := bs.cur.getPeersRateFromCache(dim) + srcRate, dstRate := r.cur.getExtremeLoad(dim) + srcPendingRate, dstPendingRate := r.cur.getPendingLoad(dim) + peersRate := r.cur.getPeersRateFromCache(dim) highRate, lowRate := srcRate, dstRate - topnHotPeer := bs.nthHotPeer[bs.cur.srcStore.GetID()][dim] + topnHotPeer := r.nthHotPeer[r.cur.srcStore.GetID()][dim] reverse := false if srcRate < dstRate { highRate, lowRate = dstRate, srcRate peersRate = -peersRate reverse = true - topnHotPeer = bs.nthHotPeer[bs.cur.dstStore.GetID()][dim] + topnHotPeer = r.nthHotPeer[r.cur.dstStore.GetID()][dim] } topnRate := math.MaxFloat64 if topnHotPeer != nil { @@ -301,8 +309,8 @@ func (bs *balanceSolver) getScoreByPriorities(dim int, rs *rankV2Ratios) int { // (highRate-maxNotWorsenedRate) / (lowRate+maxNotWorsenedRate) = futureChecker.balancedRatio maxNotWorsenedRate := (highRate - lowRate*rs.futureChecker.balancedRatio) / (1.0 + rs.futureChecker.balancedRatio) - if minNotWorsenedRate > -bs.getMinRate(dim) { // use min rate as 0 value - minNotWorsenedRate = -bs.getMinRate(dim) + if minNotWorsenedRate > -r.getMinRate(dim) { // use min rate as 0 value + minNotWorsenedRate = -r.getMinRate(dim) } if peersRate >= minNotWorsenedRate && peersRate <= maxNotWorsenedRate { @@ -357,8 +365,8 @@ func (bs *balanceSolver) getScoreByPriorities(dim int, rs *rankV2Ratios) int { minNotWorsenedRate = (highRate*rs.futureChecker.preBalancedRatio - lowRate) / (1.0 + rs.futureChecker.preBalancedRatio) // (highRate-maxNotWorsenedRate) / (lowRate+maxNotWorsenedRate) = futureChecker.preBalancedRatio maxNotWorsenedRate = (highRate - lowRate*rs.futureChecker.preBalancedRatio) / (1.0 + rs.futureChecker.preBalancedRatio) - if minNotWorsenedRate > -bs.getMinRate(dim) { // use min rate as 0 value - minNotWorsenedRate = -bs.getMinRate(dim) + if minNotWorsenedRate > -r.getMinRate(dim) { // use min rate as 0 value + minNotWorsenedRate = -r.getMinRate(dim) } // When approaching the balanced state, wait for pending influence to zero before scheduling to reduce jitter. // From pre-balanced state to balanced state, we don't need other more schedule. @@ -401,13 +409,13 @@ func (bs *balanceSolver) getScoreByPriorities(dim int, rs *rankV2Ratios) int { maxBetterRate = maxBalancedRate + rs.perceivedRatio*(highRate-lowRate-maxBalancedRate-minBetterRate) maxNotWorsenedRate = maxBalancedRate + rs.perceivedRatio*(highRate-lowRate-maxBalancedRate-minNotWorsenedRate) - minNotWorsenedRate = -bs.getMinRate(dim) // use min rate as 0 value + minNotWorsenedRate = -r.getMinRate(dim) // use min rate as 0 value } switch { case minBetterRate <= peersRate && peersRate <= maxBetterRate: // Positive score requires some restrictions. - if peersRate >= bs.getMinRate(dim) && bs.isTolerance(dim, reverse) && + if peersRate >= r.getMinRate(dim) && r.isTolerance(dim, reverse) && (!pendingRateLimit || math.Abs(srcPendingRate)+math.Abs(dstPendingRate) < 1 /*byte*/) { // avoid with pending influence when approaching the balanced state switch { case peersRate < minBalancedRate: @@ -428,44 +436,44 @@ func (bs *balanceSolver) getScoreByPriorities(dim int, rs *rankV2Ratios) int { } } -// betterThan checks if `bs.cur` is a better solution than `old`. -func (bs *balanceSolver) betterThanV2(old *solution) bool { - if old == nil || bs.cur.progressiveRank <= splitProgressiveRank { +// betterThan checks if `r.cur` is a better solution than `old`. +func (r *rankV2) betterThan(old *solution) bool { + if old == nil || r.cur.progressiveRank <= splitProgressiveRank { return true } - if bs.cur.progressiveRank != old.progressiveRank { + if r.cur.progressiveRank != old.progressiveRank { // Smaller rank is better. - return bs.cur.progressiveRank < old.progressiveRank + return r.cur.progressiveRank < old.progressiveRank } - if (bs.cur.revertRegion == nil) != (old.revertRegion == nil) { + if (r.cur.revertRegion == nil) != (old.revertRegion == nil) { // Fewer revertRegions are better. - return bs.cur.revertRegion == nil + return r.cur.revertRegion == nil } - if r := bs.compareSrcStore(bs.cur.srcStore, old.srcStore); r < 0 { + if r := r.compareSrcStore(r.cur.srcStore, old.srcStore); r < 0 { return true } else if r > 0 { return false } - if r := bs.compareDstStore(bs.cur.dstStore, old.dstStore); r < 0 { + if r := r.compareDstStore(r.cur.dstStore, old.dstStore); r < 0 { return true } else if r > 0 { return false } - if bs.cur.mainPeerStat != old.mainPeerStat { + if r.cur.mainPeerStat != old.mainPeerStat { // We will firstly consider ensuring converge faster, secondly reduce oscillation - if bs.resourceTy == writeLeader { - return getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, - bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority)) > 0 + if r.resourceTy == writeLeader { + return getRkCmpByPriority(r.firstPriority, r.cur.firstScore, old.firstScore, + r.cur.getPeersRateFromCache(r.firstPriority), old.getPeersRateFromCache(r.firstPriority)) > 0 } - firstCmp := getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, - bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority)) - secondCmp := getRkCmpByPriorityV2(bs.secondPriority, bs.cur.secondScore, old.secondScore, - bs.cur.getPeersRateFromCache(bs.secondPriority), old.getPeersRateFromCache(bs.secondPriority)) - switch bs.cur.progressiveRank { + firstCmp := getRkCmpByPriority(r.firstPriority, r.cur.firstScore, old.firstScore, + r.cur.getPeersRateFromCache(r.firstPriority), old.getPeersRateFromCache(r.firstPriority)) + secondCmp := getRkCmpByPriority(r.secondPriority, r.cur.secondScore, old.secondScore, + r.cur.getPeersRateFromCache(r.secondPriority), old.getPeersRateFromCache(r.secondPriority)) + switch r.cur.progressiveRank { case -4, -3, -2: // firstPriority if firstCmp != 0 { return firstCmp > 0 @@ -482,7 +490,7 @@ func (bs *balanceSolver) betterThanV2(old *solution) bool { return false } -func getRkCmpByPriorityV2(dim int, curScore, oldScore int, curPeersRate, oldPeersRate float64) int { +func getRkCmpByPriority(dim int, curScore, oldScore int, curPeersRate, oldPeersRate float64) int { switch { case curScore > oldScore: return 1 @@ -500,16 +508,16 @@ func getRkCmpByPriorityV2(dim int, curScore, oldScore int, curPeersRate, oldPeer } } -func (bs *balanceSolver) rankToDimStringV2() string { - switch bs.cur.progressiveRank { +func (r *rankV2) rankToDimString() string { + switch r.cur.progressiveRank { case -4: return "all" case -3: - return utils.DimToString(bs.firstPriority) + return utils.DimToString(r.firstPriority) case -2: - return utils.DimToString(bs.firstPriority) + "-only" + return utils.DimToString(r.firstPriority) + "-only" case -1: - return utils.DimToString(bs.secondPriority) + return utils.DimToString(r.secondPriority) default: return "none" } diff --git a/pkg/schedule/schedulers/hot_region_v2_test.go b/pkg/schedule/schedulers/hot_region_rank_v2_test.go similarity index 100% rename from pkg/schedule/schedulers/hot_region_v2_test.go rename to pkg/schedule/schedulers/hot_region_rank_v2_test.go diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 304698c915e2..4317c3b9c7f3 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -2663,7 +2663,7 @@ func TestExpect(t *testing.T) { hb, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder("hot-region", nil)) re.NoError(err) testCases := []struct { - initFunc func(*balanceSolver) + rankVersion string strict bool isSrc bool allow bool @@ -2674,8 +2674,8 @@ func TestExpect(t *testing.T) { }{ // test src, it will be allowed when loads are higher than expect { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, // all of + rankVersion: "v1", + strict: true, // all of load: &statistics.StoreLoad{ // all dims are higher than expect, allow schedule Loads: []float64{2.0, 2.0, 2.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {2.0, 2.0}, {2.0, 2.0}}, @@ -2688,8 +2688,8 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, // all of + rankVersion: "v1", + strict: true, // all of load: &statistics.StoreLoad{ // all dims are higher than expect, but lower than expect*toleranceRatio, not allow schedule Loads: []float64{2.0, 2.0, 2.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {2.0, 2.0}, {2.0, 2.0}}, @@ -2703,8 +2703,8 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, // all of + rankVersion: "v1", + strict: true, // all of load: &statistics.StoreLoad{ // only queryDim is lower, but the dim is no selected, allow schedule Loads: []float64{2.0, 2.0, 1.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {2.0, 2.0}, {1.0, 1.0}}, @@ -2717,8 +2717,8 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, // all of + rankVersion: "v1", + strict: true, // all of load: &statistics.StoreLoad{ // only keyDim is lower, and the dim is selected, not allow schedule Loads: []float64{2.0, 1.0, 2.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {1.0, 1.0}, {1.0, 1.0}}, @@ -2731,8 +2731,8 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: false, // first only + rankVersion: "v1", + strict: false, // first only load: &statistics.StoreLoad{ // keyDim is higher, and the dim is selected, allow schedule Loads: []float64{1.0, 2.0, 1.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, @@ -2745,8 +2745,8 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: false, // first only + rankVersion: "v1", + strict: false, // first only load: &statistics.StoreLoad{ // although byteDim is higher, the dim is not first, not allow schedule Loads: []float64{2.0, 1.0, 1.0}, HistoryLoads: [][]float64{{2.0, 1.0}, {1.0, 1.0}, {1.0, 1.0}}, @@ -2759,8 +2759,8 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: false, // first only + rankVersion: "v1", + strict: false, // first only load: &statistics.StoreLoad{ // although queryDim is higher, the dim is no selected, not allow schedule Loads: []float64{1.0, 1.0, 2.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {1.0, 1.0}, {2.0, 2.0}}, @@ -2773,8 +2773,8 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: false, // first only + rankVersion: "v1", + strict: false, // first only load: &statistics.StoreLoad{ // all dims are lower than expect, not allow schedule Loads: []float64{1.0, 1.0, 1.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {1.0, 1.0}, {1.0, 1.0}}, @@ -2787,9 +2787,9 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, - rs: writeLeader, + rankVersion: "v1", + strict: true, + rs: writeLeader, load: &statistics.StoreLoad{ // only keyDim is higher, but write leader only consider the first priority Loads: []float64{1.0, 2.0, 1.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {2.0, 2.0}, {2.0, 2.0}}, @@ -2802,9 +2802,9 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, - rs: writeLeader, + rankVersion: "v1", + strict: true, + rs: writeLeader, load: &statistics.StoreLoad{ // although byteDim is higher, the dim is not first, not allow schedule Loads: []float64{2.0, 1.0, 1.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {1.0, 1.0}, {2.0, 2.0}}, @@ -2817,9 +2817,9 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV1, - strict: true, - rs: writeLeader, + rankVersion: "v1", + strict: true, + rs: writeLeader, load: &statistics.StoreLoad{ // history loads is not higher than the expected. Loads: []float64{2.0, 1.0, 1.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}}, @@ -2833,8 +2833,8 @@ func TestExpect(t *testing.T) { }, // v2 { - initFunc: (*balanceSolver).pickCheckPolicyV2, - strict: false, // any of + rankVersion: "v2", + strict: false, // any of load: &statistics.StoreLoad{ // keyDim is higher, and the dim is selected, allow schedule Loads: []float64{1.0, 2.0, 1.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, @@ -2847,8 +2847,8 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV2, - strict: false, // any of + rankVersion: "v2", + strict: false, // any of load: &statistics.StoreLoad{ // byteDim is higher, and the dim is selected, allow schedule Loads: []float64{2.0, 1.0, 1.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {1.0, 1.0}, {1.0, 1.0}}, @@ -2861,8 +2861,8 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV2, - strict: false, // any of + rankVersion: "v2", + strict: false, // any of load: &statistics.StoreLoad{ // although queryDim is higher, the dim is no selected, not allow schedule Loads: []float64{1.0, 1.0, 2.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {1.0, 1.0}, {2.0, 2.0}}, @@ -2875,8 +2875,8 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV2, - strict: false, // any of + rankVersion: "v2", + strict: false, // any of load: &statistics.StoreLoad{ // all dims are lower than expect, not allow schedule Loads: []float64{1.0, 1.0, 1.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {1.0, 1.0}, {1.0, 1.0}}, @@ -2889,9 +2889,9 @@ func TestExpect(t *testing.T) { allow: false, }, { - initFunc: (*balanceSolver).pickCheckPolicyV2, - strict: true, - rs: writeLeader, + rankVersion: "v2", + strict: true, + rs: writeLeader, load: &statistics.StoreLoad{ // only keyDim is higher, but write leader only consider the first priority Loads: []float64{1.0, 2.0, 1.0}, HistoryLoads: [][]float64{{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, @@ -2904,9 +2904,9 @@ func TestExpect(t *testing.T) { allow: true, }, { - initFunc: (*balanceSolver).pickCheckPolicyV2, - strict: true, - rs: writeLeader, + rankVersion: "v2", + strict: true, + rs: writeLeader, load: &statistics.StoreLoad{ // although byteDim is higher, the dim is not first, not allow schedule Loads: []float64{2.0, 1.0, 1.0}, HistoryLoads: [][]float64{{2.0, 2.0}, {1.0, 1.0}, {1.0, 1.0}}, @@ -2949,8 +2949,13 @@ func TestExpect(t *testing.T) { secondPriority: utils.ByteDim, resourceTy: testCase.rs, } + if testCase.rankVersion == "v1" { + bs.rank = initRankV1(bs) + } else { + bs.rank = initRankV2(bs, hb.(*hotScheduler).conf.GetGreatDecRatio()) + } + bs.sche.conf.StrictPickingStore = testCase.strict - testCase.initFunc(bs) re.Equal(testCase.allow, bs.checkSrcByPriorityAndTolerance(testCase.load, testCase.expect, toleranceRatio)) re.Equal(testCase.allow, bs.checkDstByPriorityAndTolerance(srcToDst(testCase.load), srcToDst(testCase.expect), toleranceRatio)) re.Equal(testCase.allow, bs.checkSrcHistoryLoadsByPriorityAndTolerance(testCase.load, testCase.expect, toleranceRatio))