Skip to content

Commit

Permalink
*: fix some typos (#8544)
Browse files Browse the repository at this point in the history
ref #4399

Signed-off-by: lhy1024 <[email protected]>
  • Loading branch information
lhy1024 authored Aug 19, 2024
1 parent 10cbdcf commit a766351
Show file tree
Hide file tree
Showing 25 changed files with 60 additions and 60 deletions.
4 changes: 2 additions & 2 deletions client/pd_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ type pdServiceClient struct {
}

// NOTE: In the current implementation, the URL passed in is bound to have a scheme,
// because it is processed in `newPDServiceDiscovery`, and the url returned by etcd member owns the sheme.
// because it is processed in `newPDServiceDiscovery`, and the url returned by etcd member owns the scheme.
// When testing, the URL is also bound to have a scheme.
func newPDServiceClient(url, leaderURL string, conn *grpc.ClientConn, isLeader bool) ServiceClient {
cli := &pdServiceClient{
Expand Down Expand Up @@ -1074,7 +1074,7 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader
leaderURL := pickMatchedURL(leader.GetClientUrls(), c.tlsCfg)
leaderChanged, err := c.switchLeader(leaderURL)
followerChanged := c.updateFollowers(members, leader.GetMemberId(), leaderURL)
// don't need to recreate balancer if no changess.
// don't need to recreate balancer if no changes.
if !followerChanged && !leaderChanged {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion conf/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@

[keyspace]
## pre-alloc is used to pre-allocate keyspaces during pd bootstrap.
## Its value should be a list of strings, denotting the name of the keyspaces.
## Its value should be a list of strings, denoting the name of the keyspaces.
## Example:
## pre-alloc = ["admin", "user1", "user2"]
# pre-alloc = []
2 changes: 1 addition & 1 deletion pkg/autoscaling/calculation.go
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ func findBestGroupToScaleOut(strategy *Strategy, groups []*Plan, component Compo
},
}

// TODO: we can provide different senerios by using options and remove this kind of special judgement.
// TODO: we can provide different scenarios by using options and remove this kind of special judgement.
if component == TiKV {
group.Labels[filter.SpecialUseKey] = filter.SpecialUseHotRegion
}
Expand Down
16 changes: 8 additions & 8 deletions pkg/gc/safepoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func TestGCSafePointUpdateCurrently(t *testing.T) {
func TestServiceGCSafePointUpdate(t *testing.T) {
re := require.New(t)
manager := NewSafePointManager(newGCStorage(), config.PDServerConfig{})
gcworkerServiceID := "gc_worker"
gcWorkerServiceID := "gc_worker"
cdcServiceID := "cdc"
brServiceID := "br"
cdcServiceSafePoint := uint64(10)
Expand All @@ -101,7 +101,7 @@ func TestServiceGCSafePointUpdate(t *testing.T) {
re.NoError(err)
re.True(updated)
// the service will init the service safepoint to 0(<10 for cdc) for gc_worker.
re.Equal(gcworkerServiceID, min.ServiceID)
re.Equal(gcWorkerServiceID, min.ServiceID)
}()

// update the safepoint for br to 15 should success
Expand All @@ -111,24 +111,24 @@ func TestServiceGCSafePointUpdate(t *testing.T) {
re.NoError(err)
re.True(updated)
// the service will init the service safepoint to 0(<10 for cdc) for gc_worker.
re.Equal(gcworkerServiceID, min.ServiceID)
re.Equal(gcWorkerServiceID, min.ServiceID)
}()

// update safepoint to 8 for gc_woker should be success
// update safepoint to 8 for gc_worker should be success
go func() {
defer wg.Done()
// update with valid ttl for gc_worker should be success.
min, updated, _ := manager.UpdateServiceGCSafePoint(gcworkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now())
min, updated, _ := manager.UpdateServiceGCSafePoint(gcWorkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now())
re.True(updated)
// the current min safepoint should be 8 for gc_worker(cdc 10)
re.Equal(gcWorkerSafePoint, min.SafePoint)
re.Equal(gcworkerServiceID, min.ServiceID)
re.Equal(gcWorkerServiceID, min.ServiceID)
}()

go func() {
defer wg.Done()
// update safepoint of gc_worker's service with ttl not infinity should be failed.
_, updated, err := manager.UpdateServiceGCSafePoint(gcworkerServiceID, 10000, 10, time.Now())
_, updated, err := manager.UpdateServiceGCSafePoint(gcWorkerServiceID, 10000, 10, time.Now())
re.Error(err)
re.False(updated)
}()
Expand All @@ -145,7 +145,7 @@ func TestServiceGCSafePointUpdate(t *testing.T) {
wg.Wait()
// update safepoint to 15(>10 for cdc) for gc_worker
gcWorkerSafePoint = uint64(15)
min, updated, err := manager.UpdateServiceGCSafePoint(gcworkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now())
min, updated, err := manager.UpdateServiceGCSafePoint(gcWorkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now())
re.NoError(err)
re.True(updated)
re.Equal(cdcServiceID, min.ServiceID)
Expand Down
2 changes: 1 addition & 1 deletion pkg/member/election_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
)

// ElectionLeader defines the common interface of the leader, which is the pdpb.Member
// for in PD/API service or the tsopb.Participant in the microserives.
// for in PD/API service or the tsopb.Participant in the micro services.
type ElectionLeader interface {
// GetListenUrls returns the listen urls
GetListenUrls() []string
Expand Down
2 changes: 1 addition & 1 deletion pkg/movingaverage/weight_allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package movingaverage
// WeightAllocator will divide these items into some segments whose number named as segNum which should great than 0.
// And the items at first segment will be assigned more weight that is `segNum` times that of item at last segment.
// If you want assign same weights, just input segNum as 1.
// If length is 10 and segNum is 3, it will make the weight arrry as [3,3,3,3,2,2,2,1,1,1],
// If length is 10 and segNum is 3, it will make the weight array as [3,3,3,3,2,2,2,1,1,1],
// and then uniform it : [3,3,3,3,2,2,2,1,1,1]/sum(arr)=arr/21,
// And the final weight is [0.143,0.143,0.143,0.143,0.095,0.095,0.095,0.047,0.047,0.047];
// If length is 10 and segNum is 1, the weight is [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1];
Expand Down
2 changes: 1 addition & 1 deletion pkg/replication/replication_mode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ func TestReplicateState(t *testing.T) {
rep.tickReplicateStatus()
assertLastData(t, replicator.lastData[1], "sync", stateID, nil)

// repliate state to new member
// replicate state to new member
replicator.memberIDs = append(replicator.memberIDs, 2, 3)
rep.tickReplicateStatus()
assertLastData(t, replicator.lastData[2], "sync", stateID, nil)
Expand Down
4 changes: 2 additions & 2 deletions pkg/schedule/checker/rule_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func (c *RuleChecker) replaceUnexpectedRulePeer(region *core.RegionInfo, rf *pla
minCount := uint64(math.MaxUint64)
for _, p := range region.GetPeers() {
count := c.record.getOfflineLeaderCount(p.GetStoreId())
checkPeerhealth := func() bool {
checkPeerHealth := func() bool {
if p.GetId() == peer.GetId() {
return true
}
Expand All @@ -274,7 +274,7 @@ func (c *RuleChecker) replaceUnexpectedRulePeer(region *core.RegionInfo, rf *pla
}
return c.allowLeader(fit, p)
}
if minCount > count && checkPeerhealth() {
if minCount > count && checkPeerHealth() {
minCount = count
newLeader = p
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ var (
EpochNotMatch CancelReasonType = "epoch not match"
// AlreadyExist is the cancel reason when the operator is running.
AlreadyExist CancelReasonType = "already exist"
// AdminStop is the cancel reason when the operator is stopped by adminer.
// AdminStop is the cancel reason when the operator is stopped by admin.
AdminStop CancelReasonType = "admin stop"
// NotInRunningState is the cancel reason when the operator is not in running state.
NotInRunningState CancelReasonType = "not in running state"
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/operator/operator_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ func (suite *operatorControllerTestSuite) TestCheckOperatorLightly() {
re.Nil(r)
re.Equal(reason, RegionNotFound)

// check failed because of verions of region epoch changed
// check failed because of versions of region epoch changed
cluster.PutRegion(target)
source.GetMeta().RegionEpoch = &metapb.RegionEpoch{ConfVer: 0, Version: 1}
r, reason = controller.checkOperatorLightly(ops[0])
Expand Down
18 changes: 9 additions & 9 deletions pkg/schedule/plan/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,18 @@ type Plan interface {
SetStatus(*Status)
}

// Summary is used to analyse plan simply.
// Summary is used to analyze plan simply.
// It will return the status of store.
type Summary func([]Plan) (map[uint64]Status, bool, error)

// Collector is a plan collector
// Collector is a plan collector.
type Collector struct {
basePlan Plan
unschedulablePlans []Plan
schedulablePlans []Plan
}

// NewCollector returns a new Collector
// NewCollector returns a new Collector.
func NewCollector(plan Plan) *Collector {
return &Collector{
basePlan: plan,
Expand All @@ -50,7 +50,7 @@ func NewCollector(plan Plan) *Collector {
}
}

// Collect is used to collect a new Plan and save it into PlanCollector
// Collect is used to collect a new Plan and save it into PlanCollector.
func (c *Collector) Collect(opts ...Option) {
if c == nil {
return
Expand All @@ -63,32 +63,32 @@ func (c *Collector) Collect(opts ...Option) {
}
}

// GetPlans returns all plans and the first part plans are schedulable
// GetPlans returns all plans and the first part plans are schedulable.
func (c *Collector) GetPlans() []Plan {
if c == nil {
return nil
}
return append(c.schedulablePlans, c.unschedulablePlans...)
}

// Option is to do some action for plan
// Option is to do some action for plan.
type Option func(plan Plan)

// SetStatus is used to set status for plan
// SetStatus is used to set status for plan.
func SetStatus(status *Status) Option {
return func(plan Plan) {
plan.SetStatus(status)
}
}

// SetResource is used to generate Resource for plan
// SetResource is used to generate Resource for plan.
func SetResource(resource any) Option {
return func(plan Plan) {
plan.SetResource(resource)
}
}

// SetResourceWithStep is used to generate Resource for plan
// SetResourceWithStep is used to generate Resource for plan.
func SetResourceWithStep(resource any, step int) Option {
return func(plan Plan) {
plan.SetResourceWithStep(resource, step)
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/scatter/region_scatterer.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ func isSameDistribution(region *core.RegionInfo, targetPeers map[uint64]*metapb.

// selectNewPeer return the new peer which pick the fewest picked count.
// it keeps the origin peer if the origin store's pick count is equal the fewest pick.
// it can be diveded into three steps:
// it can be divided into three steps:
// 1. found the max pick count and the min pick count.
// 2. if max pick count equals min pick count, it means all store picked count are some, return the origin peer.
// 3. otherwise, select the store which pick count is the min pick count and pass all filter.
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/evict_slow_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (conf *evictSlowStoreSchedulerConfig) evictStore() uint64 {
return conf.getStores()[0]
}

// readyForRecovery checks whether the last cpatured candidate is ready for recovery.
// readyForRecovery checks whether the last captured candidate is ready for recovery.
func (conf *evictSlowStoreSchedulerConfig) readyForRecovery() bool {
conf.RLock()
defer conf.RUnlock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/evict_slow_trend.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ func (conf *evictSlowTrendSchedulerConfig) lastCandidateCapturedSecs() uint64 {
return DurationSinceAsSecs(conf.lastEvictCandidate.captureTS)
}

// readyForRecovery checks whether the last cpatured candidate is ready for recovery.
// readyForRecovery checks whether the last captured candidate is ready for recovery.
func (conf *evictSlowTrendSchedulerConfig) readyForRecovery() bool {
conf.RLock()
defer conf.RUnlock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ var (
Namespace: "pd",
Subsystem: "scheduler",
Name: "store_slow_trend_evicted_status",
Help: "Store evited by slow trend status for schedule",
Help: "Store evicted by slow trend status for schedule",
}, []string{"address", "store"})

storeSlowTrendActionStatusGauge = prometheus.NewGaugeVec(
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ func sliceLoadCmp(cmps ...storeLoadCmp) storeLoadCmp {
}

// stLdRankCmp returns a cmp that compares the two loads with discretized data.
// For example, if the rank function discretice data by step 10 , the load 11 and 19 will be considered equal.
// For example, if the rank function discretize data by step 10 , the load 11 and 19 will be considered equal.
func stLdRankCmp(dim func(ld *statistics.StoreLoad) float64, rank func(value float64) int64) storeLoadCmp {
return func(ld1, ld2 *statistics.StoreLoad) int {
return rankCmp(dim(ld1), dim(ld2), rank)
Expand Down
2 changes: 1 addition & 1 deletion scripts/dashboard-version
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# This file is updated by running scripts/update-dashboard.sh
# Don't edit it manullay
# Don't edit it manually
8.3.0-e6e78c7c
8 changes: 4 additions & 4 deletions scripts/update-dashboard.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail

CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
BASE_DIR="$(dirname "$CUR_DIR")"
DASHBOARD_VERSION_FILE="$BASE_DIR/scripts/dashboard-version"
# old version
Expand All @@ -23,9 +23,9 @@ if [ "$#" -ge 1 ]; then
# so that we don't need to modify the embed-dashboard-ui.sh logic
TO_FILE_VERSION=${DASHBOARD_VERSION#v}

echo "# This file is updated by running scripts/update-dashboard.sh" > $DASHBOARD_VERSION_FILE
echo "# Don't edit it manullay" >> $DASHBOARD_VERSION_FILE
echo $TO_FILE_VERSION >> $DASHBOARD_VERSION_FILE
echo "# This file is updated by running scripts/update-dashboard.sh" >$DASHBOARD_VERSION_FILE
echo "# Don't edit it manually" >>$DASHBOARD_VERSION_FILE
echo $TO_FILE_VERSION >>$DASHBOARD_VERSION_FILE
fi

echo "+ Update dashboard version to $DASHBOARD_VERSION"
Expand Down
2 changes: 1 addition & 1 deletion server/api/middleware.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func (s *serviceMiddlewareBuilder) createHandler(next func(http.ResponseWriter,
return negroni.New(append(s.handlers, negroni.WrapFunc(next))...)
}

// requestInfoMiddleware is used to gather info from requsetInfo
// requestInfoMiddleware is used to gather info from requestInfo
type requestInfoMiddleware struct {
svr *server.Server
}
Expand Down
4 changes: 2 additions & 2 deletions server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -952,8 +952,8 @@ func TestRegionHeartbeat(t *testing.T) {
re.NoError(cluster.processRegionHeartbeat(ctx, overlapRegion))
tracer.OnAllStageFinished()
re.Condition(func() bool {
fileds := tracer.LogFields()
return slice.AllOf(fileds, func(i int) bool { return fileds[i].Integer > 0 })
fields := tracer.LogFields()
return slice.AllOf(fields, func(i int) bool { return fields[i].Integer > 0 })
}, "should have stats")
region = &metapb.Region{}
ok, err = storage.LoadRegion(regions[n-1].GetID(), region)
Expand Down
4 changes: 2 additions & 2 deletions tests/server/apiv2/handlers/keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() {
success, disabledAgain := sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "disabled"})
re.True(success)
re.Equal(disabled, disabledAgain)
// Tombstoning a DISABLED keyspace should not be allowed.
// Tombstone a DISABLED keyspace should not be allowed.
success, _ = sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "tombstone"})
re.False(success)
// Archiving a DISABLED keyspace should be allowed.
Expand All @@ -119,7 +119,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() {
// Enabling an ARCHIVED keyspace is not allowed.
success, _ = sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "enabled"})
re.False(success)
// Tombstoning an ARCHIVED keyspace is allowed.
// Tombstone an ARCHIVED keyspace is allowed.
success, tombstone := sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "tombstone"})
re.True(success)
re.Equal(keyspacepb.KeyspaceState_TOMBSTONE, tombstone.State)
Expand Down
2 changes: 1 addition & 1 deletion tests/server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,7 @@ func TestSetScheduleOpt(t *testing.T) {
re := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// TODO: enable placementrules
// TODO: enable placementRules
tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.EnablePlacementRules = false })
defer tc.Destroy()
re.NoError(err)
Expand Down
12 changes: 6 additions & 6 deletions tools/pd-api-bench/cases/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,10 @@ func (c *httpController) run() {
case <-ticker.C:
err := c.Do(c.ctx, hCli)
if err != nil {
log.Error("meet erorr when doing HTTP request", zap.String("case", c.Name()), zap.Error(err))
log.Error("meet error when doing HTTP request", zap.String("case", c.Name()), zap.Error(err))
}
case <-c.ctx.Done():
log.Info("Got signal to exit running HTTP case")
log.Info("got signal to exit running HTTP case")
return
}
}
Expand Down Expand Up @@ -300,10 +300,10 @@ func (c *gRPCController) run() {
case <-ticker.C:
err := c.Unary(c.ctx, cli)
if err != nil {
log.Error("meet erorr when doing gRPC request", zap.String("case", c.Name()), zap.Error(err))
log.Error("meet error when doing gRPC request", zap.String("case", c.Name()), zap.Error(err))
}
case <-c.ctx.Done():
log.Info("Got signal to exit running gRPC case")
log.Info("got signal to exit running gRPC case")
return
}
}
Expand Down Expand Up @@ -374,10 +374,10 @@ func (c *etcdController) run() {
case <-ticker.C:
err := c.Unary(c.ctx, cli)
if err != nil {
log.Error("meet erorr when doing etcd request", zap.String("case", c.Name()), zap.Error(err))
log.Error("meet error when doing etcd request", zap.String("case", c.Name()), zap.Error(err))
}
case <-c.ctx.Done():
log.Info("Got signal to exit running etcd case")
log.Info("got signal to exit running etcd case")
return
}
}
Expand Down
Loading

0 comments on commit a766351

Please sign in to comment.