Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scheduler: remove special logic about write leader in hot scheduler #9149

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 18 additions & 48 deletions pkg/schedule/schedulers/hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -1175,30 +1175,15 @@ var dimToStep = [utils.DimLen]float64{
// 3. if the first priority and second priority are equal, we pick the store with the smaller difference between current and future to minimize oscillations.
func (bs *balanceSolver) compareSrcStore(detail1, detail2 *statistics.StoreLoadDetail) int {
if detail1 != detail2 {
var lpCmp storeLPCmp
if bs.resourceTy == writeLeader {
lpCmp = sliceLPCmp(
minLPCmp(negLoadCmp(sliceLoadCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(bs.maxSrc.Loads[bs.firstPriority], bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(bs.maxSrc.Loads[bs.secondPriority], bs.rankStep.Loads[bs.secondPriority])),
))),
diffCmp(sliceLoadCmp(
stLdRankCmp(stLdCount, stepRank(0, bs.rankStep.Count)),
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(0, bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(0, bs.rankStep.Loads[bs.secondPriority])),
)),
)
} else {
lpCmp = sliceLPCmp(
minLPCmp(negLoadCmp(sliceLoadCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(bs.maxSrc.Loads[bs.firstPriority], bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(bs.maxSrc.Loads[bs.secondPriority], bs.rankStep.Loads[bs.secondPriority])),
))),
diffCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(0, bs.rankStep.Loads[bs.firstPriority])),
),
)
}
lpCmp := sliceLPCmp(
minLPCmp(negLoadCmp(sliceLoadCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(bs.maxSrc.Loads[bs.firstPriority], bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(bs.maxSrc.Loads[bs.secondPriority], bs.rankStep.Loads[bs.secondPriority])),
))),
diffCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(0, bs.rankStep.Loads[bs.firstPriority])),
),
)
return lpCmp(detail1.LoadPred, detail2.LoadPred)
}
return 0
Expand All @@ -1214,30 +1199,15 @@ func (bs *balanceSolver) compareSrcStore(detail1, detail2 *statistics.StoreLoadD
// 3. if the first priority and second priority are equal, we pick the store with the smaller difference between current and future to minimize oscillations.
func (bs *balanceSolver) compareDstStore(detail1, detail2 *statistics.StoreLoadDetail) int {
if detail1 != detail2 {
// compare destination store
var lpCmp storeLPCmp
if bs.resourceTy == writeLeader {
lpCmp = sliceLPCmp(
maxLPCmp(sliceLoadCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(bs.minDst.Loads[bs.firstPriority], bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(bs.minDst.Loads[bs.secondPriority], bs.rankStep.Loads[bs.secondPriority])),
)),
diffCmp(sliceLoadCmp(
stLdRankCmp(stLdCount, stepRank(0, bs.rankStep.Count)),
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(0, bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(0, bs.rankStep.Loads[bs.secondPriority])),
)))
} else {
lpCmp = sliceLPCmp(
maxLPCmp(sliceLoadCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(bs.minDst.Loads[bs.firstPriority], bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(bs.minDst.Loads[bs.secondPriority], bs.rankStep.Loads[bs.secondPriority])),
)),
diffCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(0, bs.rankStep.Loads[bs.firstPriority])),
),
)
}
lpCmp := sliceLPCmp(
maxLPCmp(sliceLoadCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(bs.minDst.Loads[bs.firstPriority], bs.rankStep.Loads[bs.firstPriority])),
stLdRankCmp(stLdRate(bs.secondPriority), stepRank(bs.minDst.Loads[bs.secondPriority], bs.rankStep.Loads[bs.secondPriority])),
)),
diffCmp(
stLdRankCmp(stLdRate(bs.firstPriority), stepRank(0, bs.rankStep.Loads[bs.firstPriority])),
),
)
return lpCmp(detail1.LoadPred, detail2.LoadPred)
}
return 0
Expand Down
21 changes: 0 additions & 21 deletions pkg/schedule/schedulers/hot_region_rank_v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,16 +92,6 @@ func (r *rankV1) calcProgressiveRank() {
return
}

if r.resourceTy == writeLeader {
// For write leader, only compare the first priority.
// If the first priority is better, the progressiveRank is 3.
// Because it is not a solution that needs to be optimized.
if r.isBetterForWriteLeader() {
r.cur.progressiveRank = 3
}
return
}

isFirstBetter, isSecondBetter := r.isBetter(r.firstPriority), r.isBetter(r.secondPriority)
isFirstNotWorsened := isFirstBetter || r.isNotWorsened(r.firstPriority)
isSecondNotWorsened := isSecondBetter || r.isNotWorsened(r.secondPriority)
Expand Down Expand Up @@ -152,11 +142,6 @@ func (r *rankV1) betterThan(old *solution) bool {
}

if r.cur.mainPeerStat != old.mainPeerStat {
// compare region
if r.resourceTy == writeLeader {
return r.cur.getPeersRateFromCache(r.firstPriority) > old.getPeersRateFromCache(r.firstPriority)
}

// We will firstly consider ensuring converge faster, secondly reduce oscillation
firstCmp, secondCmp := r.getRkCmpPriorities(old)
switch r.cur.progressiveRank {
Expand Down Expand Up @@ -217,12 +202,6 @@ func (*rankV1) needSearchRevertRegions() bool {

func (*rankV1) setSearchRevertRegions() {}

func (r *rankV1) isBetterForWriteLeader() bool {
srcRate, dstRate := r.cur.getExtremeLoad(r.firstPriority)
peersRate := r.cur.getPeersRateFromCache(r.firstPriority)
return srcRate-peersRate >= dstRate+peersRate && r.isTolerance(r.firstPriority, false)
}

func (r *rankV1) isBetter(dim int) bool {
isHot, decRatio := r.getHotDecRatioByPriorities(dim)
return isHot && decRatio <= r.greatDecRatio && r.isTolerance(dim, false)
Expand Down
15 changes: 0 additions & 15 deletions pkg/schedule/schedulers/hot_region_rank_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,16 +206,6 @@ func (r *rankV2) calcProgressiveRank() {
return
}

if r.resourceTy == writeLeader {
// For write leader, only compare the first priority.
// If the first priority is better, the progressiveRank is 3.
// Because it is not a solution that needs to be optimized.
if r.getScoreByPriorities(r.firstPriority, r.firstPriorityRatios) > 0 {
r.cur.progressiveRank = 3
}
return
}

firstScore := r.getScoreByPriorities(r.firstPriority, r.firstPriorityRatios)
secondScore := r.getScoreByPriorities(r.secondPriority, r.secondPriorityRatios)
r.cur.firstScore, r.cur.secondScore = firstScore, secondScore
Expand Down Expand Up @@ -463,11 +453,6 @@ func (r *rankV2) betterThan(old *solution) bool {

if r.cur.mainPeerStat != old.mainPeerStat {
// We will firstly consider ensuring converge faster, secondly reduce oscillation
if r.resourceTy == writeLeader {
return getRkCmpByPriority(r.firstPriority, r.cur.firstScore, old.firstScore,
r.cur.getPeersRateFromCache(r.firstPriority), old.getPeersRateFromCache(r.firstPriority)) > 0
}

firstCmp := getRkCmpByPriority(r.firstPriority, r.cur.firstScore, old.firstScore,
r.cur.getPeersRateFromCache(r.firstPriority), old.getPeersRateFromCache(r.firstPriority))
secondCmp := getRkCmpByPriority(r.secondPriority, r.cur.secondScore, old.secondScore,
Expand Down
4 changes: 0 additions & 4 deletions pkg/schedule/schedulers/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,10 +260,6 @@ func stLdRate(dim int) func(ld *statistics.StoreLoad) float64 {
}
}

func stLdCount(ld *statistics.StoreLoad) float64 {
return ld.Count
}

type storeLoadCmp func(ld1, ld2 *statistics.StoreLoad) int

// negLoadCmp returns a cmp that returns the negation of cmps.
Expand Down