From 1a121a924b887346fc5830fe7a1c839d9ea97b85 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Wed, 5 Jul 2023 14:02:00 +0800 Subject: [PATCH] check other full-width symbols Signed-off-by: lhy1024 --- Makefile | 2 +- client/resource_group/controller/limiter.go | 2 +- pkg/cgroup/cgroup.go | 4 ++-- pkg/core/rangetree/range_tree_test.go | 2 +- pkg/core/store_test.go | 2 +- pkg/schedule/labeler/rules.go | 2 +- pkg/schedule/operator/operator.go | 2 +- pkg/statistics/buckets/hot_bucket_task_test.go | 6 +++--- server/config/store_config_test.go | 6 +++--- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index f6879cf20d1..7b574f56fb6 100644 --- a/Makefile +++ b/Makefile @@ -242,7 +242,7 @@ test-tso-consistency: install-tools TASK_COUNT=1 TASK_ID=1 -# The command should be used in daily CI,it will split some tasks to run parallel. +# The command should be used in daily CI,it will split some tasks to run parallel. # It should retain report.xml,coverage,coverage.xml and package.list to analyze. test-with-cover-parallel: install-tools dashboard-ui split @$(FAILPOINT_ENABLE) diff --git a/client/resource_group/controller/limiter.go b/client/resource_group/controller/limiter.go index 078894dbdec..a8038c6d8e5 100644 --- a/client/resource_group/controller/limiter.go +++ b/client/resource_group/controller/limiter.go @@ -185,7 +185,7 @@ func (r *Reservation) CancelAt(now time.Time) { // Reserve returns a Reservation that indicates how long the caller must wait before n events happen. // The Limiter takes this Reservation into account when allowing future events. -// The returned Reservation’s OK() method returns false if wait duration exceeds deadline. +// The returned Reservation's OK() method returns false if wait duration exceeds deadline. // Usage example: // // r := lim.Reserve(time.Now(), 1) diff --git a/pkg/cgroup/cgroup.go b/pkg/cgroup/cgroup.go index 1406cffed0a..2a99d2fcd3d 100644 --- a/pkg/cgroup/cgroup.go +++ b/pkg/cgroup/cgroup.go @@ -234,10 +234,10 @@ func getCgroupDetails(mountInfoPath string, cRoot string, controller string) (mo // It is possible that the controller mount and the cgroup path are not the same (both are relative to the NS root). // So start with the mount and construct the relative path of the cgroup. // To test: - // 1、start a docker to run unit test or tidb-server + // 1. start a docker to run unit test or tidb-server // > docker run -it --cpus=8 --memory=8g --name test --rm ubuntu:18.04 bash // - // 2、change the limit when the container is running + // 2. change the limit when the container is running // docker update --cpus=8 nsRelativePath := string(fields[3]) if !strings.Contains(nsRelativePath, "..") { diff --git a/pkg/core/rangetree/range_tree_test.go b/pkg/core/rangetree/range_tree_test.go index 6bbd6860989..29845cf0bca 100644 --- a/pkg/core/rangetree/range_tree_test.go +++ b/pkg/core/rangetree/range_tree_test.go @@ -100,7 +100,7 @@ func TestRingPutItem(t *testing.T) { re.Len(bucketTree.GetOverlaps(newSimpleBucketItem([]byte("010"), []byte("110"))), 2) re.Empty(bucketTree.GetOverlaps(newSimpleBucketItem([]byte("200"), []byte("300")))) - // test1: insert one key range, the old overlaps will retain like split buckets. + // test1: insert one key range, the old overlaps will retain like split buckets. // key range: [002,010],[010,090],[090,100],[100,200] bucketTree.Update(newSimpleBucketItem([]byte("010"), []byte("090"))) re.Equal(4, bucketTree.Len()) diff --git a/pkg/core/store_test.go b/pkg/core/store_test.go index f6f2518c241..be0fd0f9418 100644 --- a/pkg/core/store_test.go +++ b/pkg/core/store_test.go @@ -174,7 +174,7 @@ func TestLowSpaceScoreV2(t *testing.T) { bigger: newStoreInfoWithAvailable(1, 10*units.GiB, 100*units.GiB, 1.5), small: newStoreInfoWithAvailable(2, 10*units.GiB, 100*units.GiB, 1.4), }, { - // store1 and store2 has same capacity and regionSize(40g) + // store1 and store2 has same capacity and regionSize (40g) // but store1 has less available space size bigger: newStoreInfoWithAvailable(1, 60*units.GiB, 100*units.GiB, 1), small: newStoreInfoWithAvailable(2, 80*units.GiB, 100*units.GiB, 2), diff --git a/pkg/schedule/labeler/rules.go b/pkg/schedule/labeler/rules.go index c902fff8f66..3b50779d659 100644 --- a/pkg/schedule/labeler/rules.go +++ b/pkg/schedule/labeler/rules.go @@ -171,7 +171,7 @@ func (rule *LabelRule) expireBefore(t time.Time) bool { return rule.minExpire.Before(t) } -// initKeyRangeRulesFromLabelRuleData init and adjust []KeyRangeRule from `LabelRule.Data“ +// initKeyRangeRulesFromLabelRuleData init and adjust []KeyRangeRule from `LabelRule.Data` func initKeyRangeRulesFromLabelRuleData(data interface{}) ([]*KeyRangeRule, error) { rules, ok := data.([]interface{}) if !ok { diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index f8df4a428dd..d841c785b59 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -129,7 +129,7 @@ func (o *Operator) String() string { for i := range o.steps { stepStrs[i] = o.steps[i].String() } - s := fmt.Sprintf("%s {%s} (kind:%s, region:%v(%v, %v), createAt:%s, startAt:%s, currentStep:%v, size:%d, steps:[%s],timeout:[%s])", + s := fmt.Sprintf("%s {%s} (kind:%s, region:%v(%v, %v), createAt:%s, startAt:%s, currentStep:%v, size:%d, steps:[%s], timeout:[%s])", o.desc, o.brief, o.kind, o.regionID, o.regionEpoch.GetVersion(), o.regionEpoch.GetConfVer(), o.GetCreateTime(), o.GetStartTime(), atomic.LoadInt32(&o.currentStep), o.ApproximateSize, strings.Join(stepStrs, ", "), o.timeout.String()) if o.CheckSuccess() { diff --git a/pkg/statistics/buckets/hot_bucket_task_test.go b/pkg/statistics/buckets/hot_bucket_task_test.go index f2f28ef3d02..c825cfe5b86 100644 --- a/pkg/statistics/buckets/hot_bucket_task_test.go +++ b/pkg/statistics/buckets/hot_bucket_task_test.go @@ -68,7 +68,7 @@ func TestCheckBucketsTask(t *testing.T) { ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() hotCache := NewBucketsCache(ctx) - // case1: add bucket successfully + // case1: add bucket successfully buckets := newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20"), []byte("30")}, 0) task := NewCheckPeerTask(buckets) re.True(hotCache.CheckAsync(task)) @@ -93,7 +93,7 @@ func TestCheckBucketsTask(t *testing.T) { re.Len(item, 1) re.Equal(-2, item[0].HotDegree) - // case3:add bucket successful and the hot degree should inherit from the old one. + // case3:add bucket successful and the hot degree should inherit from the old one. buckets = newTestBuckets(1, 1, [][]byte{[]byte("10"), []byte("20")}, 0) task = NewCheckPeerTask(buckets) re.True(hotCache.CheckAsync(task)) @@ -109,7 +109,7 @@ func TestCollectBucketStatsTask(t *testing.T) { ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() hotCache := NewBucketsCache(ctx) - // case1: add bucket successfully + // case1: add bucket successfully for i := uint64(0); i < 10; i++ { buckets := convertToBucketTreeItem(newTestBuckets(i, 1, [][]byte{[]byte(strconv.FormatUint(i*10, 10)), []byte(strconv.FormatUint((i+1)*10, 10))}, 0)) diff --git a/server/config/store_config_test.go b/server/config/store_config_test.go index 6342926b636..01eeae5e8e3 100644 --- a/server/config/store_config_test.go +++ b/server/config/store_config_test.go @@ -133,21 +133,21 @@ func TestMergeCheck(t *testing.T) { mergeKeys: 200000, pass: true, }, { - // case 2: the smallest region is 68MiB,it can't be merged again. + // case 2: the smallest region is 68MiB,it can't be merged again. size: 144 + 20, mergeSize: 20, keys: 1440000 + 200000, mergeKeys: 200000, pass: true, }, { - // case 3: the smallest region is 50MiB,it can be merged again. + // case 3: the smallest region is 50MiB,it can be merged again. size: 144 + 2, mergeSize: 50, keys: 1440000 + 20000, mergeKeys: 500000, pass: false, }, { - // case4: the smallest region is 51MiB,it can't be merged again. + // case4: the smallest region is 51MiB,it can't be merged again. size: 144 + 3, mergeSize: 50, keys: 1440000 + 30000,