diff --git a/.golangci.yml b/.golangci.yml index 0e5028634ae..283de8e96b0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,6 +13,7 @@ linters: - bodyclose - testifylint - gofmt + - revive disable: - errcheck linters-settings: @@ -52,3 +53,149 @@ linters-settings: rewrite-rules: - pattern: "interface{}" replacement: "any" + revive: + ignore-generated-header: false + severity: error + confidence: 0.8 + rules: + - name: atomic + severity: warning + exclude: [""] + disabled: false + - name: blank-imports + severity: warning + exclude: [""] + disabled: false + - name: confusing-naming + severity: warning + disabled: false + exclude: [""] + - name: confusing-results + severity: warning + disabled: false + exclude: [""] + - name: context-as-argument + severity: warning + disabled: false + exclude: [""] + arguments: + - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness" + - name: datarace + severity: warning + disabled: false + exclude: [""] + - name: defer + severity: warning + disabled: false + exclude: [""] + arguments: + - ["call-chain", "loop"] + - name: dot-imports + severity: warning + disabled: false + exclude: [""] + - name: duplicated-imports + severity: warning + disabled: false + exclude: [""] + - name: empty-block + severity: warning + disabled: false + exclude: [""] + - name: empty-lines + severity: warning + disabled: false + exclude: [""] + - name: error-return + severity: warning + disabled: false + exclude: [""] + - name: error-strings + severity: warning + disabled: false + exclude: [""] + - name: error-naming + severity: warning + disabled: false + exclude: [""] + - name: exported + severity: warning + disabled: false + exclude: [""] + arguments: + - "checkPrivateReceivers" + - "sayRepetitiveInsteadOfStutters" + - name: identical-branches + severity: warning + disabled: false + exclude: [""] + - name: if-return + severity: warning + disabled: false + exclude: [""] + - name: modifies-parameter + severity: warning + disabled: false + exclude: [""] + - name: optimize-operands-order + severity: warning + disabled: false + exclude: [""] + - name: package-comments + severity: warning + disabled: false + exclude: [""] + - name: range + severity: warning + disabled: false + exclude: [""] + - name: range-val-in-closure + severity: warning + disabled: false + exclude: [""] + - name: range-val-address + severity: warning + disabled: false + exclude: [""] + - name: receiver-naming + severity: warning + disabled: false + exclude: [""] + - name: indent-error-flow + severity: warning + disabled: false + exclude: [""] + - name: superfluous-else + severity: warning + disabled: false + exclude: [""] + - name: unnecessary-stmt + severity: warning + disabled: false + exclude: [""] + - name: unreachable-code + severity: warning + disabled: false + exclude: [""] + - name: unused-parameter + severity: warning + disabled: false + exclude: [""] + arguments: + - allowRegex: "^_" + - name: unused-receiver + severity: warning + disabled: false + exclude: [""] + - name: useless-break + severity: warning + disabled: false + exclude: [""] + - name: var-naming + severity: warning + disabled: false + exclude: [""] + - name: waitgroup-by-value + severity: warning + disabled: false + exclude: [""] diff --git a/Makefile b/Makefile index 0d02189f508..205896c377a 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ BUILD_BIN_PATH := $(ROOT_PATH)/bin build: pd-server pd-ctl pd-recover -tools: pd-tso-bench pd-heartbeat-bench regions-dump stores-dump pd-api-bench +tools: pd-tso-bench pd-heartbeat-bench regions-dump stores-dump pd-api-bench pd-ut PD_SERVER_DEP := ifeq ($(SWAGGER), 1) @@ -108,7 +108,6 @@ pd-server-basic: .PHONY: pre-build build tools pd-server pd-server-basic # Tools - pd-ctl: cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/pd-ctl pd-ctl/main.go pd-tso-bench: @@ -127,8 +126,12 @@ regions-dump: cd tools && CGO_ENABLED=0 go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/regions-dump regions-dump/main.go stores-dump: cd tools && CGO_ENABLED=0 go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/stores-dump stores-dump/main.go +pd-ut: pd-xprog + cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/pd-ut pd-ut/ut.go +pd-xprog: + cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -tags xprog -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/xprog pd-ut/xprog.go -.PHONY: pd-ctl pd-tso-bench pd-recover pd-analysis pd-heartbeat-bench simulator regions-dump stores-dump pd-api-bench +.PHONY: pd-ctl pd-tso-bench pd-recover pd-analysis pd-heartbeat-bench simulator regions-dump stores-dump pd-api-bench pd-ut #### Docker image #### @@ -181,9 +184,6 @@ static: install-tools pre-build @ gofmt -s -l -d $(PACKAGE_DIRECTORIES) 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run --verbose $(PACKAGE_DIRECTORIES) --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config revive.toml $(PACKAGES) - @ for mod in $(SUBMODULES); do cd $$mod && $(MAKE) static && cd $(ROOT_PATH) > /dev/null; done # Because CI downloads the dashboard code and runs gofmt, we can't add this check into static now. @@ -225,6 +225,12 @@ failpoint-disable: install-tools #### Test #### +ut: pd-ut + @$(FAILPOINT_ENABLE) + ./bin/pd-ut run --race + @$(CLEAN_UT_BINARY) + @$(FAILPOINT_DISABLE) + PACKAGE_DIRECTORIES := $(subst $(PD_PKG)/,,$(PACKAGES)) TEST_PKGS := $(filter $(shell find . -iname "*_test.go" -exec dirname {} \; | \ sort -u | sed -e "s/^\./github.com\/tikv\/pd/"),$(PACKAGES)) @@ -303,6 +309,8 @@ split: clean: failpoint-disable clean-test clean-build +CLEAN_UT_BINARY := find . -name '*.test.bin'| xargs rm -f + clean-test: # Cleaning test tmp... rm -rf /tmp/test_pd* @@ -310,6 +318,7 @@ clean-test: rm -rf /tmp/test_etcd* rm -f $(REAL_CLUSTER_TEST_PATH)/playground.log go clean -testcache + @$(CLEAN_UT_BINARY) clean-build: # Cleaning building files... diff --git a/client/Makefile b/client/Makefile index 3328bfe8d11..3e8f6b0d383 100644 --- a/client/Makefile +++ b/client/Makefile @@ -45,8 +45,6 @@ static: install-tools @ gofmt -s -l -d . 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c ../.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config ../revive.toml ./... tidy: @ go mod tidy diff --git a/client/client.go b/client/client.go index b9535aa504e..1852b77e4c6 100644 --- a/client/client.go +++ b/client/client.go @@ -69,16 +69,10 @@ type GlobalConfigItem struct { PayLoad []byte } -// Client is a PD (Placement Driver) RPC client. -// It should not be used after calling Close(). -type Client interface { - // GetClusterID gets the cluster ID from PD. - GetClusterID(ctx context.Context) uint64 +// RPCClient is a PD (Placement Driver) RPC and related mcs client which can only call RPC. +type RPCClient interface { // GetAllMembers gets the members Info from PD GetAllMembers(ctx context.Context) ([]*pdpb.Member, error) - // GetLeaderURL returns current leader's URL. It returns "" before - // syncing leader from server. - GetLeaderURL() string // GetRegion gets a region and its leader Peer from PD by key. // The region may expire after split. Caller is responsible for caching and // taking care of region change. @@ -133,17 +127,12 @@ type Client interface { StoreGlobalConfig(ctx context.Context, configPath string, items []GlobalConfigItem) error // WatchGlobalConfig returns a stream with all global config and updates WatchGlobalConfig(ctx context.Context, configPath string, revision int64) (chan []GlobalConfigItem, error) - // UpdateOption updates the client option. - UpdateOption(option DynamicOption, value any) error // GetExternalTimestamp returns external timestamp GetExternalTimestamp(ctx context.Context) (uint64, error) // SetExternalTimestamp sets external timestamp SetExternalTimestamp(ctx context.Context, timestamp uint64) error - // GetServiceDiscovery returns ServiceDiscovery - GetServiceDiscovery() ServiceDiscovery - // TSOClient is the TSO client. TSOClient // MetaStorageClient is the meta storage client. @@ -154,6 +143,24 @@ type Client interface { GCClient // ResourceManagerClient manages resource group metadata and token assignment. ResourceManagerClient +} + +// Client is a PD (Placement Driver) RPC client. +// It should not be used after calling Close(). +type Client interface { + RPCClient + + // GetClusterID gets the cluster ID from PD. + GetClusterID(ctx context.Context) uint64 + // GetLeaderURL returns current leader's URL. It returns "" before + // syncing leader from server. + GetLeaderURL() string + // GetServiceDiscovery returns ServiceDiscovery + GetServiceDiscovery() ServiceDiscovery + + // UpdateOption updates the client option. + UpdateOption(option DynamicOption, value any) error + // Close closes the client. Close() } @@ -431,12 +438,12 @@ func NewAPIContextV1() APIContext { } // GetAPIVersion returns the API version. -func (apiCtx *apiContextV1) GetAPIVersion() (version APIVersion) { +func (*apiContextV1) GetAPIVersion() (version APIVersion) { return V1 } // GetKeyspaceName returns the keyspace name. -func (apiCtx *apiContextV1) GetKeyspaceName() (keyspaceName string) { +func (*apiContextV1) GetKeyspaceName() (keyspaceName string) { return "" } @@ -453,7 +460,7 @@ func NewAPIContextV2(keyspaceName string) APIContext { } // GetAPIVersion returns the API version. -func (apiCtx *apiContextV2) GetAPIVersion() (version APIVersion) { +func (*apiContextV2) GetAPIVersion() (version APIVersion) { return V2 } @@ -912,7 +919,7 @@ func handleRegionResponse(res *pdpb.GetRegionResponse) *Region { return r } -func (c *client) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string, opts ...GetRegionOption) (*Region, error) { +func (c *client) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string, _ ...GetRegionOption) (*Region, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span = span.Tracer().StartSpan("pdclient.GetRegionFromMember", opentracing.ChildOf(span.Context())) defer span.Finish() diff --git a/client/keyspace_client.go b/client/keyspace_client.go index 340ecd0250e..e52a4f85f05 100644 --- a/client/keyspace_client.go +++ b/client/keyspace_client.go @@ -128,7 +128,7 @@ func (c *client) UpdateKeyspaceState(ctx context.Context, id uint32, state keysp // It returns a stream of slices of keyspace metadata. // The first message in stream contains all current keyspaceMeta, // all subsequent messages contains new put events for all keyspaces. -func (c *client) WatchKeyspaces(ctx context.Context) (chan []*keyspacepb.KeyspaceMeta, error) { +func (*client) WatchKeyspaces(context.Context) (chan []*keyspacepb.KeyspaceMeta, error) { return nil, errors.Errorf("WatchKeyspaces unimplemented") } diff --git a/client/mock_pd_service_discovery.go b/client/mock_pd_service_discovery.go index 17613a2f9e4..f1fabd0a1d2 100644 --- a/client/mock_pd_service_discovery.go +++ b/client/mock_pd_service_discovery.go @@ -56,19 +56,19 @@ func (m *mockPDServiceDiscovery) GetAllServiceClients() []ServiceClient { return m.clients } -func (m *mockPDServiceDiscovery) GetClusterID() uint64 { return 0 } -func (m *mockPDServiceDiscovery) GetKeyspaceID() uint32 { return 0 } -func (m *mockPDServiceDiscovery) GetKeyspaceGroupID() uint32 { return 0 } -func (m *mockPDServiceDiscovery) GetServiceURLs() []string { return nil } -func (m *mockPDServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { return nil } -func (m *mockPDServiceDiscovery) GetClientConns() *sync.Map { return nil } -func (m *mockPDServiceDiscovery) GetServingURL() string { return "" } -func (m *mockPDServiceDiscovery) GetBackupURLs() []string { return nil } -func (m *mockPDServiceDiscovery) GetServiceClient() ServiceClient { return nil } -func (m *mockPDServiceDiscovery) GetOrCreateGRPCConn(url string) (*grpc.ClientConn, error) { +func (*mockPDServiceDiscovery) GetClusterID() uint64 { return 0 } +func (*mockPDServiceDiscovery) GetKeyspaceID() uint32 { return 0 } +func (*mockPDServiceDiscovery) GetKeyspaceGroupID() uint32 { return 0 } +func (*mockPDServiceDiscovery) GetServiceURLs() []string { return nil } +func (*mockPDServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { return nil } +func (*mockPDServiceDiscovery) GetClientConns() *sync.Map { return nil } +func (*mockPDServiceDiscovery) GetServingURL() string { return "" } +func (*mockPDServiceDiscovery) GetBackupURLs() []string { return nil } +func (*mockPDServiceDiscovery) GetServiceClient() ServiceClient { return nil } +func (*mockPDServiceDiscovery) GetOrCreateGRPCConn(string) (*grpc.ClientConn, error) { return nil, nil } -func (m *mockPDServiceDiscovery) ScheduleCheckMemberChanged() {} -func (m *mockPDServiceDiscovery) CheckMemberChanged() error { return nil } -func (m *mockPDServiceDiscovery) AddServingURLSwitchedCallback(callbacks ...func()) {} -func (m *mockPDServiceDiscovery) AddServiceURLsSwitchedCallback(callbacks ...func()) {} +func (*mockPDServiceDiscovery) ScheduleCheckMemberChanged() {} +func (*mockPDServiceDiscovery) CheckMemberChanged() error { return nil } +func (*mockPDServiceDiscovery) AddServingURLSwitchedCallback(...func()) {} +func (*mockPDServiceDiscovery) AddServiceURLsSwitchedCallback(...func()) {} diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index defb797b7ca..97e82ec3321 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -247,9 +247,9 @@ func (c *pdServiceClient) NeedRetry(pdErr *pdpb.Error, err error) bool { return !(err == nil && pdErr == nil) } -type errFn func(pdErr *pdpb.Error) bool +type errFn func(*pdpb.Error) bool -func emptyErrorFn(pdErr *pdpb.Error) bool { +func emptyErrorFn(*pdpb.Error) bool { return false } @@ -618,7 +618,7 @@ func (c *pdServiceDiscovery) checkLeaderHealth(ctx context.Context) { } func (c *pdServiceDiscovery) checkFollowerHealth(ctx context.Context) { - c.followers.Range(func(key, value any) bool { + c.followers.Range(func(_, value any) bool { // To ensure that the leader's healthy check is not delayed, shorten the duration. ctx, cancel := context.WithTimeout(ctx, MemberHealthCheckInterval/3) defer cancel() @@ -661,7 +661,7 @@ func (c *pdServiceDiscovery) SetKeyspaceID(keyspaceID uint32) { } // GetKeyspaceGroupID returns the ID of the keyspace group -func (c *pdServiceDiscovery) GetKeyspaceGroupID() uint32 { +func (*pdServiceDiscovery) GetKeyspaceGroupID() uint32 { // PD/API service only supports the default keyspace group return defaultKeySpaceGroupID } diff --git a/client/resource_group/controller/config.go b/client/resource_group/controller/config.go index ffc360c385c..a4176c073cc 100644 --- a/client/resource_group/controller/config.go +++ b/client/resource_group/controller/config.go @@ -52,6 +52,10 @@ const ( defaultTargetPeriod = 5 * time.Second // defaultMaxWaitDuration is the max duration to wait for the token before throwing error. defaultMaxWaitDuration = 30 * time.Second + // defaultWaitRetryTimes is the times to retry when waiting for the token. + defaultWaitRetryTimes = 10 + // defaultWaitRetryInterval is the interval to retry when waiting for the token. + defaultWaitRetryInterval = 50 * time.Millisecond ) const ( @@ -85,6 +89,12 @@ type Config struct { // LTBMaxWaitDuration is the max wait time duration for local token bucket. LTBMaxWaitDuration Duration `toml:"ltb-max-wait-duration" json:"ltb-max-wait-duration"` + // WaitRetryInterval is the interval to retry when waiting for the token. + WaitRetryInterval Duration `toml:"wait-retry-interval" json:"wait-retry-interval"` + + // WaitRetryTimes is the times to retry when waiting for the token. + WaitRetryTimes int `toml:"wait-retry-times" json:"wait-retry-times"` + // RequestUnit is the configuration determines the coefficients of the RRU and WRU cost. // This configuration should be modified carefully. RequestUnit RequestUnitConfig `toml:"request-unit" json:"request-unit"` @@ -98,6 +108,8 @@ func DefaultConfig() *Config { return &Config{ DegradedModeWaitDuration: NewDuration(defaultDegradedModeWaitDuration), LTBMaxWaitDuration: NewDuration(defaultMaxWaitDuration), + WaitRetryInterval: NewDuration(defaultWaitRetryInterval), + WaitRetryTimes: defaultWaitRetryTimes, RequestUnit: DefaultRequestUnitConfig(), EnableControllerTraceLog: false, } @@ -155,6 +167,8 @@ type RUConfig struct { // some config for client LTBMaxWaitDuration time.Duration + WaitRetryInterval time.Duration + WaitRetryTimes int DegradedModeWaitDuration time.Duration } @@ -176,6 +190,8 @@ func GenerateRUConfig(config *Config) *RUConfig { WriteBytesCost: RequestUnit(config.RequestUnit.WriteCostPerByte), CPUMsCost: RequestUnit(config.RequestUnit.CPUMsCost), LTBMaxWaitDuration: config.LTBMaxWaitDuration.Duration, + WaitRetryInterval: config.WaitRetryInterval.Duration, + WaitRetryTimes: config.WaitRetryTimes, DegradedModeWaitDuration: config.DegradedModeWaitDuration.Duration, } } diff --git a/client/resource_group/controller/controller.go b/client/resource_group/controller/controller.go index a695aaf82bc..79bd6a9c3a6 100755 --- a/client/resource_group/controller/controller.go +++ b/client/resource_group/controller/controller.go @@ -39,8 +39,6 @@ import ( const ( controllerConfigPath = "resource_group/controller" - maxRetry = 10 - retryInterval = 50 * time.Millisecond maxNotificationChanLen = 200 needTokensAmplification = 1.1 trickleReserveDuration = 1250 * time.Millisecond @@ -105,6 +103,20 @@ func WithMaxWaitDuration(d time.Duration) ResourceControlCreateOption { } } +// WithWaitRetryInterval is the option to set the retry interval when waiting for the token. +func WithWaitRetryInterval(d time.Duration) ResourceControlCreateOption { + return func(controller *ResourceGroupsController) { + controller.ruConfig.WaitRetryInterval = d + } +} + +// WithWaitRetryTimes is the option to set the times to retry when waiting for the token. +func WithWaitRetryTimes(times int) ResourceControlCreateOption { + return func(controller *ResourceGroupsController) { + controller.ruConfig.WaitRetryTimes = times + } +} + var _ ResourceGroupKVInterceptor = (*ResourceGroupsController)(nil) // ResourceGroupsController implements ResourceGroupKVInterceptor. @@ -186,7 +198,7 @@ func loadServerConfig(ctx context.Context, provider ResourceGroupProvider) (*Con log.Warn("[resource group controller] server does not save config, load config failed") return DefaultConfig(), nil } - config := &Config{} + config := DefaultConfig() err = json.Unmarshal(kvs[0].GetValue(), config) if err != nil { return nil, err @@ -367,7 +379,7 @@ func (c *ResourceGroupsController) Start(ctx context.Context) { } for _, item := range resp { cfgRevision = item.Kv.ModRevision - config := &Config{} + config := DefaultConfig() if err := json.Unmarshal(item.Kv.Value, config); err != nil { continue } @@ -386,8 +398,7 @@ func (c *ResourceGroupsController) Start(ctx context.Context) { } case gc := <-c.tokenBucketUpdateChan: - now := gc.run.now - go gc.handleTokenBucketUpdateEvent(c.loopCtx, now) + go gc.handleTokenBucketUpdateEvent(c.loopCtx) } } }() @@ -461,7 +472,7 @@ func (c *ResourceGroupsController) cleanUpResourceGroup() { } func (c *ResourceGroupsController) executeOnAllGroups(f func(controller *groupCostController)) { - c.groupsController.Range(func(name, value any) bool { + c.groupsController.Range(func(_, value any) bool { f(value.(*groupCostController)) return true }) @@ -492,7 +503,7 @@ func (c *ResourceGroupsController) handleTokenBucketResponse(resp []*rmpb.TokenB func (c *ResourceGroupsController) collectTokenBucketRequests(ctx context.Context, source string, typ selectType) { c.run.currentRequests = make([]*rmpb.TokenBucketRequest, 0) - c.groupsController.Range(func(name, value any) bool { + c.groupsController.Range(func(_, value any) bool { gc := value.(*groupCostController) request := gc.collectRequestAndConsumption(typ) if request != nil { @@ -844,7 +855,7 @@ func (gc *groupCostController) resetEmergencyTokenAcquisition() { } } -func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context, now time.Time) { +func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context) { switch gc.mode { case rmpb.GroupMode_RawMode: for _, counter := range gc.run.resourceTokens { @@ -861,7 +872,7 @@ func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context, counter.notify.setupNotificationCh = nil threshold := counter.notify.setupNotificationThreshold counter.notify.mu.Unlock() - counter.limiter.SetupNotificationThreshold(now, threshold) + counter.limiter.SetupNotificationThreshold(threshold) case <-ctx.Done(): return } @@ -882,7 +893,7 @@ func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context, counter.notify.setupNotificationCh = nil threshold := counter.notify.setupNotificationThreshold counter.notify.mu.Unlock() - counter.limiter.SetupNotificationThreshold(now, threshold) + counter.limiter.SetupNotificationThreshold(threshold) case <-ctx.Done(): return } @@ -1206,7 +1217,7 @@ func (gc *groupCostController) onRequestWait( var i int var d time.Duration retryLoop: - for i = 0; i < maxRetry; i++ { + for i = 0; i < gc.mainCfg.WaitRetryTimes; i++ { switch gc.mode { case rmpb.GroupMode_RawMode: res := make([]*Reservation, 0, len(requestResourceLimitTypeList)) @@ -1230,8 +1241,8 @@ func (gc *groupCostController) onRequestWait( } } gc.requestRetryCounter.Inc() - time.Sleep(retryInterval) - waitDuration += retryInterval + time.Sleep(gc.mainCfg.WaitRetryInterval) + waitDuration += gc.mainCfg.WaitRetryInterval } if err != nil { gc.failedRequestCounter.Inc() diff --git a/client/resource_group/controller/limiter.go b/client/resource_group/controller/limiter.go index 7e76934643f..230ad46ecf1 100644 --- a/client/resource_group/controller/limiter.go +++ b/client/resource_group/controller/limiter.go @@ -218,7 +218,7 @@ func (lim *Limiter) Reserve(ctx context.Context, waitDuration time.Duration, now } // SetupNotificationThreshold enables the notification at the given threshold. -func (lim *Limiter) SetupNotificationThreshold(now time.Time, threshold float64) { +func (lim *Limiter) SetupNotificationThreshold(threshold float64) { lim.mu.Lock() defer lim.mu.Unlock() lim.notifyThreshold = threshold diff --git a/client/resource_group/controller/model.go b/client/resource_group/controller/model.go index dedc2ed7359..9e86de69abb 100644 --- a/client/resource_group/controller/model.go +++ b/client/resource_group/controller/model.go @@ -75,8 +75,7 @@ func newKVCalculator(cfg *RUConfig) *KVCalculator { } // Trickle ... -func (kc *KVCalculator) Trickle(*rmpb.Consumption) { -} +func (*KVCalculator) Trickle(*rmpb.Consumption) {} // BeforeKVRequest ... func (kc *KVCalculator) BeforeKVRequest(consumption *rmpb.Consumption, req RequestInfo) { @@ -166,11 +165,11 @@ func (dsc *SQLCalculator) Trickle(consumption *rmpb.Consumption) { } // BeforeKVRequest ... -func (dsc *SQLCalculator) BeforeKVRequest(consumption *rmpb.Consumption, req RequestInfo) { +func (*SQLCalculator) BeforeKVRequest(*rmpb.Consumption, RequestInfo) { } // AfterKVRequest ... -func (dsc *SQLCalculator) AfterKVRequest(consumption *rmpb.Consumption, req RequestInfo, res ResponseInfo) { +func (*SQLCalculator) AfterKVRequest(*rmpb.Consumption, RequestInfo, ResponseInfo) { } func getRUValueFromConsumption(custom *rmpb.Consumption, typ rmpb.RequestUnitType) float64 { diff --git a/client/resource_group/controller/testutil.go b/client/resource_group/controller/testutil.go index 4df8c9bba0d..01a9c3af1fc 100644 --- a/client/resource_group/controller/testutil.go +++ b/client/resource_group/controller/testutil.go @@ -52,7 +52,7 @@ func (tri *TestRequestInfo) StoreID() uint64 { } // ReplicaNumber implements the RequestInfo interface. -func (tri *TestRequestInfo) ReplicaNumber() int64 { +func (*TestRequestInfo) ReplicaNumber() int64 { return 1 } diff --git a/client/resource_group/controller/util_test.go b/client/resource_group/controller/util_test.go index a89ea08b955..10fa7c345a5 100644 --- a/client/resource_group/controller/util_test.go +++ b/client/resource_group/controller/util_test.go @@ -27,7 +27,6 @@ type example struct { } func TestDurationJSON(t *testing.T) { - t.Parallel() re := require.New(t) example := &example{} @@ -41,7 +40,6 @@ func TestDurationJSON(t *testing.T) { } func TestDurationTOML(t *testing.T) { - t.Parallel() re := require.New(t) example := &example{} diff --git a/client/retry/backoff_test.go b/client/retry/backoff_test.go index c877860b5ae..8df06b75f94 100644 --- a/client/retry/backoff_test.go +++ b/client/retry/backoff_test.go @@ -95,7 +95,7 @@ func TestBackoffer(t *testing.T) { // Test the retryable checker. execCount = 0 bo = InitialBackoffer(base, max, total) - bo.SetRetryableChecker(func(err error) bool { + bo.SetRetryableChecker(func(error) bool { return execCount < 2 }) err = bo.Exec(ctx, func() error { @@ -169,7 +169,7 @@ func (w *testingWriter) Write(p []byte) (n int, err error) { w.messages = append(w.messages, m) return n, nil } -func (w *testingWriter) Sync() error { +func (*testingWriter) Sync() error { return nil } diff --git a/client/testutil/check_env_dummy.go b/client/testutil/check_env_dummy.go index 2fbcbd1a9e7..c8f4d268c9d 100644 --- a/client/testutil/check_env_dummy.go +++ b/client/testutil/check_env_dummy.go @@ -16,6 +16,6 @@ package testutil -func environmentCheck(addr string) bool { +func environmentCheck(_ string) bool { return true } diff --git a/client/tlsutil/tlsconfig.go b/client/tlsutil/tlsconfig.go index c9cee5987bb..a8bac17f676 100644 --- a/client/tlsutil/tlsconfig.go +++ b/client/tlsutil/tlsconfig.go @@ -131,7 +131,7 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { } if info.AllowedCN != "" { - cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + cfg.VerifyPeerCertificate = func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { if len(chains) != 0 { if info.AllowedCN == chains[0].Subject.CommonName { @@ -145,10 +145,10 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // this only reloads certs when there's a client request // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { return NewCert(info.CertFile, info.KeyFile, info.parseFunc) } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cfg.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { return NewCert(info.CertFile, info.KeyFile, info.parseFunc) } return cfg, nil diff --git a/client/tso_batch_controller.go b/client/tso_batch_controller.go index 5f3b08c2895..d7ba5d7e74b 100644 --- a/client/tso_batch_controller.go +++ b/client/tso_batch_controller.go @@ -140,7 +140,7 @@ func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical in for i := 0; i < tbc.collectedRequestCount; i++ { tsoReq := tbc.collectedRequests[i] tsoReq.physical, tsoReq.logical = physical, tsoutil.AddLogical(firstLogical, int64(i), suffixBits) - defer trace.StartRegion(tsoReq.requestCtx, "pdclient.tsoReqDequeue").End() + defer trace.StartRegion(tsoReq.requestCtx, "pdclient.tsoReqDequeue").End() // nolint tsoReq.tryDone(err) } // Prevent the finished requests from being processed again. diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index 88f8ffd61b5..ad3aa1c5d74 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -580,7 +580,7 @@ func (c *tsoClient) allowTSOFollowerProxy(dc string) bool { // chooseStream uses the reservoir sampling algorithm to randomly choose a connection. // connectionCtxs will only have only one stream to choose when the TSO Follower Proxy is off. -func (c *tsoClient) chooseStream(connectionCtxs *sync.Map) (connectionCtx *tsoConnectionContext) { +func (*tsoClient) chooseStream(connectionCtxs *sync.Map) (connectionCtx *tsoConnectionContext) { idx := 0 connectionCtxs.Range(func(_, cc any) bool { j := rand.Intn(idx + 1) @@ -797,6 +797,7 @@ func (c *tsoClient) processRequests( stream tsoStream, dcLocation string, tbc *tsoBatchController, ) error { requests := tbc.getCollectedRequests() + // nolint for _, req := range requests { defer trace.StartRegion(req.requestCtx, "pdclient.tsoReqSend").End() if span := opentracing.SpanFromContext(req.requestCtx); span != nil && span.Tracer() != nil { diff --git a/client/tso_service_discovery.go b/client/tso_service_discovery.go index f6c46346d5d..34ef16f88b0 100644 --- a/client/tso_service_discovery.go +++ b/client/tso_service_discovery.go @@ -349,13 +349,11 @@ func (c *tsoServiceDiscovery) CheckMemberChanged() error { // AddServingURLSwitchedCallback adds callbacks which will be called when the primary in // a primary/secondary configured cluster is switched. -func (c *tsoServiceDiscovery) AddServingURLSwitchedCallback(callbacks ...func()) { -} +func (*tsoServiceDiscovery) AddServingURLSwitchedCallback(...func()) {} // AddServiceURLsSwitchedCallback adds callbacks which will be called when any primary/secondary // in a primary/secondary configured cluster is changed. -func (c *tsoServiceDiscovery) AddServiceURLsSwitchedCallback(callbacks ...func()) { -} +func (*tsoServiceDiscovery) AddServiceURLsSwitchedCallback(...func()) {} // SetTSOLocalServURLsUpdatedCallback adds a callback which will be called when the local tso // allocator leader list is updated. diff --git a/client/tso_stream.go b/client/tso_stream.go index 83c0f08d4e0..14b72bc697b 100644 --- a/client/tso_stream.go +++ b/client/tso_stream.go @@ -34,13 +34,13 @@ type tsoStreamBuilderFactory interface { type pdTSOStreamBuilderFactory struct{} -func (f *pdTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { +func (*pdTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { return &pdTSOStreamBuilder{client: pdpb.NewPDClient(cc), serverURL: cc.Target()} } type tsoTSOStreamBuilderFactory struct{} -func (f *tsoTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { +func (*tsoTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { return &tsoTSOStreamBuilder{client: tsopb.NewTSOClient(cc), serverURL: cc.Target()} } diff --git a/go.mod b/go.mod index 2620f5ad0a7..c76242f3753 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/joho/godotenv v1.4.0 github.com/mailru/easyjson v0.7.6 - github.com/mgechev/revive v1.0.2 github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d github.com/pingcap/errcode v0.3.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c @@ -95,7 +94,6 @@ require ( github.com/dnephin/pflag v1.0.7 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/fatih/color v1.10.0 // indirect - github.com/fatih/structtag v1.2.0 // indirect github.com/fogleman/gg v1.3.0 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -138,15 +136,11 @@ require ( github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a // indirect github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/mattn/go-runewidth v0.0.8 // indirect github.com/mattn/go-sqlite3 v1.14.15 // indirect - github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 // indirect github.com/minio/sio v0.3.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oleiade/reflections v1.0.1 // indirect - github.com/olekukonko/tablewriter v0.0.4 // indirect github.com/onsi/gomega v1.20.1 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 // indirect diff --git a/go.sum b/go.sum index d99804c887c..d11fad07aa6 100644 --- a/go.sum +++ b/go.sum @@ -111,11 +111,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -141,7 +138,6 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -316,31 +312,19 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= -github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= -github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus= github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -355,8 +339,6 @@ github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= -github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -644,7 +626,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -712,7 +693,6 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -807,7 +787,6 @@ gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= moul.io/zapgorm2 v1.1.0 h1:qwAlMBYf+qJkJ7PAzJl4oCe6eS6QGiKAXUPeis0+RBE= moul.io/zapgorm2 v1.1.0/go.mod h1:emRfKjNqSzVj5lcgasBdovIXY1jSOwFz2GQZn1Rddks= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pkg/audit/audit.go b/pkg/audit/audit.go index b971b09ed7e..f84d035f8c9 100644 --- a/pkg/audit/audit.go +++ b/pkg/audit/audit.go @@ -118,7 +118,7 @@ func NewLocalLogBackend(before bool) Backend { } // ProcessHTTPRequest is used to implement audit.Backend -func (l *LocalLogBackend) ProcessHTTPRequest(r *http.Request) bool { +func (*LocalLogBackend) ProcessHTTPRequest(r *http.Request) bool { requestInfo, ok := requestutil.RequestInfoFrom(r.Context()) if !ok { return false diff --git a/pkg/audit/audit_test.go b/pkg/audit/audit_test.go index 8098b36975e..9066d81ebe3 100644 --- a/pkg/audit/audit_test.go +++ b/pkg/audit/audit_test.go @@ -32,7 +32,6 @@ import ( ) func TestLabelMatcher(t *testing.T) { - t.Parallel() re := require.New(t) matcher := &LabelMatcher{"testSuccess"} labels1 := &BackendLabels{Labels: []string{"testFail", "testSuccess"}} @@ -42,7 +41,6 @@ func TestLabelMatcher(t *testing.T) { } func TestPrometheusHistogramBackend(t *testing.T) { - t.Parallel() re := require.New(t) serviceAuditHistogramTest := prometheus.NewHistogramVec( prometheus.HistogramOpts{ @@ -90,7 +88,6 @@ func TestPrometheusHistogramBackend(t *testing.T) { } func TestLocalLogBackendUsingFile(t *testing.T) { - t.Parallel() re := require.New(t) backend := NewLocalLogBackend(true) fname := testutil.InitTempFileLogger("info") diff --git a/pkg/autoscaling/calculation.go b/pkg/autoscaling/calculation.go index d85af498e47..8c8783dd618 100644 --- a/pkg/autoscaling/calculation.go +++ b/pkg/autoscaling/calculation.go @@ -409,7 +409,7 @@ func buildPlans(planMap map[string]map[string]struct{}, resourceTypeMap map[stri } // TODO: implement heterogeneous logic and take cluster information into consideration. -func findBestGroupToScaleIn(strategy *Strategy, scaleInQuota float64, groups []*Plan) Plan { +func findBestGroupToScaleIn(_ *Strategy, _ float64, groups []*Plan) Plan { return *groups[0] } diff --git a/pkg/autoscaling/calculation_test.go b/pkg/autoscaling/calculation_test.go index 05a348af59d..9eb4ad648df 100644 --- a/pkg/autoscaling/calculation_test.go +++ b/pkg/autoscaling/calculation_test.go @@ -29,7 +29,6 @@ import ( ) func TestGetScaledTiKVGroups(t *testing.T) { - t.Parallel() re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -204,7 +203,7 @@ func TestGetScaledTiKVGroups(t *testing.T) { type mockQuerier struct{} -func (q *mockQuerier) Query(options *QueryOptions) (QueryResult, error) { +func (*mockQuerier) Query(options *QueryOptions) (QueryResult, error) { result := make(QueryResult) for _, addr := range options.addresses { result[addr] = mockResultValue @@ -214,7 +213,6 @@ func (q *mockQuerier) Query(options *QueryOptions) (QueryResult, error) { } func TestGetTotalCPUUseTime(t *testing.T) { - t.Parallel() re := require.New(t) querier := &mockQuerier{} instances := []instance{ @@ -237,7 +235,6 @@ func TestGetTotalCPUUseTime(t *testing.T) { } func TestGetTotalCPUQuota(t *testing.T) { - t.Parallel() re := require.New(t) querier := &mockQuerier{} instances := []instance{ @@ -260,7 +257,6 @@ func TestGetTotalCPUQuota(t *testing.T) { } func TestScaleOutGroupLabel(t *testing.T) { - t.Parallel() re := require.New(t) var jsonStr = []byte(` { @@ -303,7 +299,6 @@ func TestScaleOutGroupLabel(t *testing.T) { } func TestStrategyChangeCount(t *testing.T) { - t.Parallel() re := require.New(t) var count uint64 = 2 strategy := &Strategy{ diff --git a/pkg/autoscaling/prometheus_test.go b/pkg/autoscaling/prometheus_test.go index b4cf9aefd91..9fe69e810d1 100644 --- a/pkg/autoscaling/prometheus_test.go +++ b/pkg/autoscaling/prometheus_test.go @@ -168,7 +168,7 @@ func makeJSONResponse(promResp *response) (*http.Response, []byte, error) { return response, body, nil } -func (c *normalClient) URL(ep string, args map[string]string) *url.URL { +func (*normalClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } @@ -180,7 +180,6 @@ func (c *normalClient) Do(_ context.Context, req *http.Request) (response *http. } func TestRetrieveCPUMetrics(t *testing.T) { - t.Parallel() re := require.New(t) client := &normalClient{ mockData: make(map[string]*response), @@ -207,11 +206,11 @@ func TestRetrieveCPUMetrics(t *testing.T) { type emptyResponseClient struct{} -func (c *emptyResponseClient) URL(ep string, args map[string]string) *url.URL { +func (*emptyResponseClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } -func (c *emptyResponseClient) Do(_ context.Context, req *http.Request) (r *http.Response, body []byte, err error) { +func (*emptyResponseClient) Do(context.Context, *http.Request) (r *http.Response, body []byte, err error) { promResp := &response{ Status: "success", Data: data{ @@ -225,7 +224,6 @@ func (c *emptyResponseClient) Do(_ context.Context, req *http.Request) (r *http. } func TestEmptyResponse(t *testing.T) { - t.Parallel() re := require.New(t) client := &emptyResponseClient{} querier := NewPrometheusQuerier(client) @@ -237,11 +235,11 @@ func TestEmptyResponse(t *testing.T) { type errorHTTPStatusClient struct{} -func (c *errorHTTPStatusClient) URL(ep string, args map[string]string) *url.URL { +func (*errorHTTPStatusClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } -func (c *errorHTTPStatusClient) Do(_ context.Context, req *http.Request) (r *http.Response, body []byte, err error) { +func (*errorHTTPStatusClient) Do(context.Context, *http.Request) (r *http.Response, body []byte, err error) { promResp := &response{} r, body, err = makeJSONResponse(promResp) @@ -253,7 +251,6 @@ func (c *errorHTTPStatusClient) Do(_ context.Context, req *http.Request) (r *htt } func TestErrorHTTPStatus(t *testing.T) { - t.Parallel() re := require.New(t) client := &errorHTTPStatusClient{} querier := NewPrometheusQuerier(client) @@ -265,11 +262,11 @@ func TestErrorHTTPStatus(t *testing.T) { type errorPrometheusStatusClient struct{} -func (c *errorPrometheusStatusClient) URL(ep string, args map[string]string) *url.URL { +func (*errorPrometheusStatusClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } -func (c *errorPrometheusStatusClient) Do(_ context.Context, req *http.Request) (r *http.Response, body []byte, err error) { +func (*errorPrometheusStatusClient) Do(_ context.Context, _ *http.Request) (r *http.Response, body []byte, err error) { promResp := &response{ Status: "error", } @@ -279,7 +276,6 @@ func (c *errorPrometheusStatusClient) Do(_ context.Context, req *http.Request) ( } func TestErrorPrometheusStatus(t *testing.T) { - t.Parallel() re := require.New(t) client := &errorPrometheusStatusClient{} querier := NewPrometheusQuerier(client) @@ -290,7 +286,6 @@ func TestErrorPrometheusStatus(t *testing.T) { } func TestGetInstanceNameFromAddress(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { address string @@ -328,7 +323,6 @@ func TestGetInstanceNameFromAddress(t *testing.T) { } func TestGetDurationExpression(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { duration time.Duration diff --git a/pkg/balancer/balancer_test.go b/pkg/balancer/balancer_test.go index 996b4f1da35..2c760c6220c 100644 --- a/pkg/balancer/balancer_test.go +++ b/pkg/balancer/balancer_test.go @@ -22,7 +22,6 @@ import ( ) func TestBalancerPutAndDelete(t *testing.T) { - t.Parallel() re := require.New(t) balancers := []Balancer[uint32]{ NewRoundRobin[uint32](), @@ -56,7 +55,6 @@ func TestBalancerPutAndDelete(t *testing.T) { } func TestBalancerDuplicate(t *testing.T) { - t.Parallel() re := require.New(t) balancers := []Balancer[uint32]{ NewRoundRobin[uint32](), @@ -77,7 +75,6 @@ func TestBalancerDuplicate(t *testing.T) { } func TestRoundRobin(t *testing.T) { - t.Parallel() re := require.New(t) balancer := NewRoundRobin[uint32]() for i := 0; i < 100; i++ { diff --git a/pkg/btree/btree_generic.go b/pkg/btree/btree_generic.go index f918a8ac686..599614678eb 100644 --- a/pkg/btree/btree_generic.go +++ b/pkg/btree/btree_generic.go @@ -73,7 +73,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//revive:disable +// nolint package btree import ( diff --git a/pkg/btree/btree_generic_test.go b/pkg/btree/btree_generic_test.go index 9aa118fb8ad..fd0df3e5aaf 100644 --- a/pkg/btree/btree_generic_test.go +++ b/pkg/btree/btree_generic_test.go @@ -475,7 +475,7 @@ func BenchmarkSeek(b *testing.B) { b.StartTimer() for i := 0; i < b.N; i++ { - tr.AscendGreaterOrEqual(Int(i%size), func(i Int) bool { return false }) + tr.AscendGreaterOrEqual(Int(i%size), func(_ Int) bool { return false }) } } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index dbef41d2754..43e97dfa2b0 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -25,7 +25,6 @@ import ( ) func TestExpireRegionCache(t *testing.T) { - t.Parallel() re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -121,7 +120,6 @@ func sortIDs(ids []uint64) []uint64 { } func TestLRUCache(t *testing.T) { - t.Parallel() re := require.New(t) cache := newLRU(3) @@ -199,7 +197,6 @@ func TestLRUCache(t *testing.T) { } func TestFifoCache(t *testing.T) { - t.Parallel() re := require.New(t) cache := NewFIFO(3) cache.Put(1, "1") @@ -227,7 +224,6 @@ func TestFifoCache(t *testing.T) { } func TestFifoFromLastSameElems(t *testing.T) { - t.Parallel() re := require.New(t) type testStruct struct { value string @@ -260,7 +256,6 @@ func TestFifoFromLastSameElems(t *testing.T) { } func TestTwoQueueCache(t *testing.T) { - t.Parallel() re := require.New(t) cache := newTwoQueue(3) cache.Put(1, "1") @@ -345,7 +340,6 @@ func (pq PriorityQueueItemTest) ID() uint64 { } func TestPriorityQueue(t *testing.T) { - t.Parallel() re := require.New(t) testData := []PriorityQueueItemTest{0, 1, 2, 3, 4, 5} pq := NewPriorityQueue(0) diff --git a/pkg/cgroup/cgroup.go b/pkg/cgroup/cgroup.go index e45dcbc0929..133bd3158c8 100644 --- a/pkg/cgroup/cgroup.go +++ b/pkg/cgroup/cgroup.go @@ -143,7 +143,6 @@ func combineErrors(err1, err2 error) error { func readFile(filepath string) (res []byte, err error) { var f *os.File - //nolint:gosec f, err = os.Open(filepath) if err != nil { return nil, err @@ -185,7 +184,6 @@ func controllerMatch(field string, controller string) bool { // The controller is defined via either type `memory` for cgroup v1 or via empty type for cgroup v2, // where the type is the second field in /proc/[pid]/cgroup file func detectControlPath(cgroupFilePath string, controller string) (string, error) { - //nolint:gosec cgroup, err := os.Open(cgroupFilePath) if err != nil { return "", errors.Wrapf(err, "failed to read %s cgroup from cgroups file: %s", controller, cgroupFilePath) @@ -229,7 +227,6 @@ func detectControlPath(cgroupFilePath string, controller string) (string, error) // See http://man7.org/linux/man-pages/man5/proc.5.html for `mountinfo` format. func getCgroupDetails(mountInfoPath string, cRoot string, controller string) (mount []string, version []int, err error) { - //nolint:gosec info, err := os.Open(mountInfoPath) if err != nil { return nil, nil, errors.Wrapf(err, "failed to read mounts info from file: %s", mountInfoPath) @@ -411,7 +408,6 @@ func detectCPUQuotaInV2(cRoot string) (period, quota int64, err error) { func detectCPUUsageInV2(cRoot string) (stime, utime uint64, err error) { statFilePath := filepath.Join(cRoot, cgroupV2CPUStat) var stat *os.File - //nolint:gosec stat, err = os.Open(statFilePath) if err != nil { return 0, 0, errors.Wrapf(err, "can't read cpu usage from cgroup v2 at %s", statFilePath) @@ -444,7 +440,6 @@ func detectCPUUsageInV2(cRoot string) (stime, utime uint64, err error) { func readInt64Value(root, filename string, cgVersion int) (value uint64, err error) { filePath := filepath.Join(root, filename) - //nolint:gosec file, err := os.Open(filePath) if err != nil { return 0, errors.Wrapf(err, "can't read %s from cgroup v%d", filename, cgVersion) diff --git a/pkg/cgroup/cgroup_memory.go b/pkg/cgroup/cgroup_memory.go index fb8e8f212dc..2a6d581023e 100644 --- a/pkg/cgroup/cgroup_memory.go +++ b/pkg/cgroup/cgroup_memory.go @@ -177,7 +177,6 @@ func detectMemInactiveFileUsageInV2(root string) (uint64, error) { func detectMemStatValue(cRoot, filename, key string, cgVersion int) (value uint64, err error) { statFilePath := filepath.Join(cRoot, filename) - //nolint:gosec stat, err := os.Open(statFilePath) if err != nil { return 0, errors.Wrapf(err, "can't read file %s from cgroup v%d", filename, cgVersion) diff --git a/pkg/codec/codec_test.go b/pkg/codec/codec_test.go index f734d2e528e..50bf552a60d 100644 --- a/pkg/codec/codec_test.go +++ b/pkg/codec/codec_test.go @@ -21,7 +21,6 @@ import ( ) func TestDecodeBytes(t *testing.T) { - t.Parallel() re := require.New(t) key := "abcdefghijklmnopqrstuvwxyz" for i := 0; i < len(key); i++ { @@ -32,7 +31,6 @@ func TestDecodeBytes(t *testing.T) { } func TestTableID(t *testing.T) { - t.Parallel() re := require.New(t) key := EncodeBytes([]byte("t\x80\x00\x00\x00\x00\x00\x00\xff")) re.Equal(int64(0xff), key.TableID()) diff --git a/pkg/core/metrics.go b/pkg/core/metrics.go index e6f3535b1d7..d23cf9dfcaa 100644 --- a/pkg/core/metrics.go +++ b/pkg/core/metrics.go @@ -123,19 +123,19 @@ func NewNoopHeartbeatProcessTracer() RegionHeartbeatProcessTracer { return &noopHeartbeatProcessTracer{} } -func (n *noopHeartbeatProcessTracer) Begin() {} -func (n *noopHeartbeatProcessTracer) OnPreCheckFinished() {} -func (n *noopHeartbeatProcessTracer) OnAsyncHotStatsFinished() {} -func (n *noopHeartbeatProcessTracer) OnRegionGuideFinished() {} -func (n *noopHeartbeatProcessTracer) OnSaveCacheBegin() {} -func (n *noopHeartbeatProcessTracer) OnSaveCacheFinished() {} -func (n *noopHeartbeatProcessTracer) OnCheckOverlapsFinished() {} -func (n *noopHeartbeatProcessTracer) OnValidateRegionFinished() {} -func (n *noopHeartbeatProcessTracer) OnSetRegionFinished() {} -func (n *noopHeartbeatProcessTracer) OnUpdateSubTreeFinished() {} -func (n *noopHeartbeatProcessTracer) OnCollectRegionStatsFinished() {} -func (n *noopHeartbeatProcessTracer) OnAllStageFinished() {} -func (n *noopHeartbeatProcessTracer) LogFields() []zap.Field { +func (*noopHeartbeatProcessTracer) Begin() {} +func (*noopHeartbeatProcessTracer) OnPreCheckFinished() {} +func (*noopHeartbeatProcessTracer) OnAsyncHotStatsFinished() {} +func (*noopHeartbeatProcessTracer) OnRegionGuideFinished() {} +func (*noopHeartbeatProcessTracer) OnSaveCacheBegin() {} +func (*noopHeartbeatProcessTracer) OnSaveCacheFinished() {} +func (*noopHeartbeatProcessTracer) OnCheckOverlapsFinished() {} +func (*noopHeartbeatProcessTracer) OnValidateRegionFinished() {} +func (*noopHeartbeatProcessTracer) OnSetRegionFinished() {} +func (*noopHeartbeatProcessTracer) OnUpdateSubTreeFinished() {} +func (*noopHeartbeatProcessTracer) OnCollectRegionStatsFinished() {} +func (*noopHeartbeatProcessTracer) OnAllStageFinished() {} +func (*noopHeartbeatProcessTracer) LogFields() []zap.Field { return nil } diff --git a/pkg/core/rangetree/range_tree_test.go b/pkg/core/rangetree/range_tree_test.go index 0664a7bdbef..6955947cb1b 100644 --- a/pkg/core/rangetree/range_tree_test.go +++ b/pkg/core/rangetree/range_tree_test.go @@ -85,7 +85,6 @@ func bucketDebrisFactory(startKey, endKey []byte, item RangeItem) []RangeItem { } func TestRingPutItem(t *testing.T) { - t.Parallel() re := require.New(t) bucketTree := NewRangeTree(2, bucketDebrisFactory) bucketTree.Update(newSimpleBucketItem([]byte("002"), []byte("100"))) @@ -120,7 +119,6 @@ func TestRingPutItem(t *testing.T) { } func TestDebris(t *testing.T) { - t.Parallel() re := require.New(t) ringItem := newSimpleBucketItem([]byte("010"), []byte("090")) var overlaps []RangeItem diff --git a/pkg/core/region.go b/pkg/core/region.go index f7a4ef5f0fd..baabafa1fa9 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -716,7 +716,7 @@ type RegionGuideFunc func(region, origin *RegionInfo) (saveKV, saveCache, needSy // GenerateRegionGuideFunc is used to generate a RegionGuideFunc. Control the log output by specifying the log function. // nil means do not print the log. func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { - noLog := func(msg string, fields ...zap.Field) {} + noLog := func(string, ...zap.Field) {} debug, info := noLog, noLog if enableLog { debug = log.Debug @@ -964,7 +964,7 @@ func (r *RegionsInfo) AtomicCheckAndPutRegion(region *RegionInfo, trace RegionHe } // GetRelevantRegions returns the relevant regions for a given region. -func (r *RegionsInfo) GetRelevantRegions(region *RegionInfo, trace RegionHeartbeatProcessTracer) (origin *RegionInfo, overlaps []*regionItem) { +func (r *RegionsInfo) GetRelevantRegions(region *RegionInfo, _ RegionHeartbeatProcessTracer) (origin *RegionInfo, overlaps []*regionItem) { r.t.RLock() defer r.t.RUnlock() origin = r.getRegionLocked(region.GetID()) diff --git a/pkg/core/store_test.go b/pkg/core/store_test.go index 67618a63ea9..5cb324e5635 100644 --- a/pkg/core/store_test.go +++ b/pkg/core/store_test.go @@ -62,7 +62,7 @@ func TestDistinctScore(t *testing.T) { re.Equal(float64(0), DistinctScore(labels, stores, store)) } -func TestCloneStore(t *testing.T) { +func TestCloneStore(_ *testing.T) { meta := &metapb.Store{Id: 1, Address: "mock://tikv-1", Labels: []*metapb.StoreLabel{{Key: "zone", Value: "z1"}, {Key: "host", Value: "h1"}}} store := NewStoreInfo(meta) start := time.Now() diff --git a/pkg/core/storelimit/limit_test.go b/pkg/core/storelimit/limit_test.go index 75865330311..e11618767a1 100644 --- a/pkg/core/storelimit/limit_test.go +++ b/pkg/core/storelimit/limit_test.go @@ -45,7 +45,6 @@ func TestStoreLimit(t *testing.T) { } func TestSlidingWindow(t *testing.T) { - t.Parallel() re := require.New(t) capacity := int64(defaultWindowSize) s := NewSlidingWindows() @@ -92,7 +91,6 @@ func TestSlidingWindow(t *testing.T) { } func TestWindow(t *testing.T) { - t.Parallel() re := require.New(t) capacity := int64(100 * 10) s := newWindow(capacity) diff --git a/pkg/core/storelimit/sliding_window.go b/pkg/core/storelimit/sliding_window.go index 0a70eb548d0..8feb0a2094d 100644 --- a/pkg/core/storelimit/sliding_window.go +++ b/pkg/core/storelimit/sliding_window.go @@ -50,7 +50,7 @@ func NewSlidingWindows() *SlidingWindows { } // Version returns v2 -func (s *SlidingWindows) Version() string { +func (*SlidingWindows) Version() string { return VersionV2 } @@ -75,8 +75,7 @@ func (s *SlidingWindows) Feedback(e float64) { } // Reset does nothing because the capacity depends on the feedback. -func (s *SlidingWindows) Reset(_ float64, _ Type) { -} +func (*SlidingWindows) Reset(_ float64, _ Type) {} func (s *SlidingWindows) set(cap float64, typ Type) { if typ != SendSnapshot { diff --git a/pkg/core/storelimit/store_limit.go b/pkg/core/storelimit/store_limit.go index dc1de88e09f..8d70b2918a1 100644 --- a/pkg/core/storelimit/store_limit.go +++ b/pkg/core/storelimit/store_limit.go @@ -82,15 +82,15 @@ func NewStoreRateLimit(ratePerSec float64) StoreLimit { } // Ack does nothing. -func (l *StoreRateLimit) Ack(_ int64, _ Type) {} +func (*StoreRateLimit) Ack(_ int64, _ Type) {} // Version returns v1 -func (l *StoreRateLimit) Version() string { +func (*StoreRateLimit) Version() string { return VersionV1 } // Feedback does nothing. -func (l *StoreRateLimit) Feedback(_ float64) {} +func (*StoreRateLimit) Feedback(_ float64) {} // Available returns the number of available tokens. // notice that the priority level is not used. diff --git a/pkg/dashboard/adapter/redirector_test.go b/pkg/dashboard/adapter/redirector_test.go index fff052f1d50..7767a6fda34 100644 --- a/pkg/dashboard/adapter/redirector_test.go +++ b/pkg/dashboard/adapter/redirector_test.go @@ -42,14 +42,14 @@ func TestRedirectorTestSuite(t *testing.T) { func (suite *redirectorTestSuite) SetupSuite() { suite.tempText = "temp1" - suite.tempServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + suite.tempServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, _ = io.WriteString(w, suite.tempText) })) suite.testName = "test1" suite.redirector = NewRedirector(suite.testName, nil) suite.noRedirectHTTPClient = &http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { + CheckRedirect: func(*http.Request, []*http.Request) error { // ErrUseLastResponse can be returned by Client.CheckRedirect hooks to // control how redirects are processed. If returned, the next request // is not sent and the most recent response is returned with its body diff --git a/pkg/dashboard/dashboard.go b/pkg/dashboard/dashboard.go index 9cd61a6f332..998127d0f1b 100644 --- a/pkg/dashboard/dashboard.go +++ b/pkg/dashboard/dashboard.go @@ -69,7 +69,7 @@ func GetServiceBuilders() []server.HandlerBuilder { // The order of execution must be sequential. return []server.HandlerBuilder{ // Dashboard API Service - func(ctx context.Context, srv *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { + func(_ context.Context, srv *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { distroutil.MustLoadAndReplaceStrings() if cfg, err = adapter.GenDashboardConfig(srv); err != nil { diff --git a/pkg/election/leadership_test.go b/pkg/election/leadership_test.go index de2e4b1129b..1fde4ddeba7 100644 --- a/pkg/election/leadership_test.go +++ b/pkg/election/leadership_test.go @@ -117,35 +117,35 @@ func TestExitWatch(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/election/fastTick", "return(true)")) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/fastTick", "return(true)")) // Case1: close the client before the watch loop starts - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(_ *embed.Etcd, client *clientv3.Client) func() { re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayWatcher", `pause`)) client.Close() re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayWatcher")) return func() {} }) // Case2: close the client when the watch loop is running - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(_ *embed.Etcd, client *clientv3.Client) func() { // Wait for the watch loop to start time.Sleep(500 * time.Millisecond) client.Close() return func() {} }) // Case3: delete the leader key - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(_ *embed.Etcd, client *clientv3.Client) func() { leaderKey := leaderKey _, err := client.Delete(context.Background(), leaderKey) re.NoError(err) return func() {} }) // Case4: close the server before the watch loop starts - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(server *embed.Etcd, _ *clientv3.Client) func() { re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayWatcher", `pause`)) server.Close() re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayWatcher")) return func() {} }) // Case5: close the server when the watch loop is running - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(server *embed.Etcd, _ *clientv3.Client) func() { // Wait for the watch loop to start time.Sleep(500 * time.Millisecond) server.Close() diff --git a/pkg/encryption/config_test.go b/pkg/encryption/config_test.go index 6f7e4a41b03..4134d46c2f3 100644 --- a/pkg/encryption/config_test.go +++ b/pkg/encryption/config_test.go @@ -23,7 +23,6 @@ import ( ) func TestAdjustDefaultValue(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{} err := config.Adjust() @@ -35,21 +34,18 @@ func TestAdjustDefaultValue(t *testing.T) { } func TestAdjustInvalidDataEncryptionMethod(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{DataEncryptionMethod: "unknown"} re.Error(config.Adjust()) } func TestAdjustNegativeRotationDuration(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{DataKeyRotationPeriod: typeutil.NewDuration(time.Duration(int64(-1)))} re.Error(config.Adjust()) } func TestAdjustInvalidMasterKeyType(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{MasterKey: MasterKeyConfig{Type: "unknown"}} re.Error(config.Adjust()) diff --git a/pkg/encryption/crypter_test.go b/pkg/encryption/crypter_test.go index 12a851d1563..9ac72bd7813 100644 --- a/pkg/encryption/crypter_test.go +++ b/pkg/encryption/crypter_test.go @@ -24,7 +24,6 @@ import ( ) func TestEncryptionMethodSupported(t *testing.T) { - t.Parallel() re := require.New(t) re.Error(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_PLAINTEXT)) re.Error(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_UNKNOWN)) @@ -34,7 +33,6 @@ func TestEncryptionMethodSupported(t *testing.T) { } func TestKeyLength(t *testing.T) { - t.Parallel() re := require.New(t) _, err := KeyLength(encryptionpb.EncryptionMethod_PLAINTEXT) re.Error(err) @@ -52,7 +50,6 @@ func TestKeyLength(t *testing.T) { } func TestNewIv(t *testing.T) { - t.Parallel() re := require.New(t) ivCtr, err := NewIvCTR() re.NoError(err) @@ -63,7 +60,6 @@ func TestNewIv(t *testing.T) { } func TestNewDataKey(t *testing.T) { - t.Parallel() re := require.New(t) for _, method := range []encryptionpb.EncryptionMethod{ encryptionpb.EncryptionMethod_AES128_CTR, @@ -82,7 +78,6 @@ func TestNewDataKey(t *testing.T) { } func TestAesGcmCrypter(t *testing.T) { - t.Parallel() re := require.New(t) key, err := hex.DecodeString("ed568fbd8c8018ed2d042a4e5d38d6341486922d401d2022fb81e47c900d3f07") re.NoError(err) diff --git a/pkg/encryption/key_manager_test.go b/pkg/encryption/key_manager_test.go index 74f8b9a3b47..26453eeb5b3 100644 --- a/pkg/encryption/key_manager_test.go +++ b/pkg/encryption/key_manager_test.go @@ -774,7 +774,7 @@ func TestSetLeadershipMasterKeyWithCiphertextKey(t *testing.T) { outputMasterKey, _ := hex.DecodeString(testMasterKey) outputCiphertextKey, _ := hex.DecodeString(testCiphertextKey) helper.newMasterKey = func( - meta *encryptionpb.MasterKey, + _ *encryptionpb.MasterKey, ciphertext []byte, ) (*MasterKey, error) { if newMasterKeyCalled < 2 { @@ -905,7 +905,7 @@ func TestKeyRotation(t *testing.T) { mockNow := int64(1601679533) helper.now = func() time.Time { return time.Unix(atomic.LoadInt64(&mockNow), 0) } mockTick := make(chan time.Time) - helper.tick = func(ticker *time.Ticker) <-chan time.Time { return mockTick } + helper.tick = func(_ *time.Ticker) <-chan time.Time { return mockTick } // Listen on watcher event reloadEvent := make(chan struct{}, 10) helper.eventAfterReloadByWatcher = func() { @@ -1001,7 +1001,7 @@ func TestKeyRotationConflict(t *testing.T) { mockNow := int64(1601679533) helper.now = func() time.Time { return time.Unix(atomic.LoadInt64(&mockNow), 0) } mockTick := make(chan time.Time, 10) - helper.tick = func(ticker *time.Ticker) <-chan time.Time { return mockTick } + helper.tick = func(_ *time.Ticker) <-chan time.Time { return mockTick } // Listen on ticker event tickerEvent := make(chan struct{}, 10) helper.eventAfterTicker = func() { diff --git a/pkg/encryption/kms.go b/pkg/encryption/kms.go index 7c52b4280c2..99dcf9619a3 100644 --- a/pkg/encryption/kms.go +++ b/pkg/encryption/kms.go @@ -60,7 +60,7 @@ func newMasterKeyFromKMS( roleArn := os.Getenv(envAwsRoleArn) tokenFile := os.Getenv(envAwsWebIdentityTokenFile) sessionName := os.Getenv(envAwsRoleSessionName) - optFn := func(options *kms.Options) {} + optFn := func(*kms.Options) {} // Session name is optional. if roleArn != "" && tokenFile != "" { client := sts.NewFromConfig(cfg) diff --git a/pkg/encryption/master_key_test.go b/pkg/encryption/master_key_test.go index 4bc08dab7a5..31962e9e99d 100644 --- a/pkg/encryption/master_key_test.go +++ b/pkg/encryption/master_key_test.go @@ -24,7 +24,6 @@ import ( ) func TestPlaintextMasterKey(t *testing.T) { - t.Parallel() re := require.New(t) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_Plaintext{ @@ -50,7 +49,6 @@ func TestPlaintextMasterKey(t *testing.T) { } func TestEncrypt(t *testing.T) { - t.Parallel() re := require.New(t) keyHex := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 key, err := hex.DecodeString(keyHex) @@ -66,7 +64,6 @@ func TestEncrypt(t *testing.T) { } func TestDecrypt(t *testing.T) { - t.Parallel() re := require.New(t) keyHex := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 key, err := hex.DecodeString(keyHex) @@ -83,7 +80,6 @@ func TestDecrypt(t *testing.T) { } func TestNewFileMasterKeyMissingPath(t *testing.T) { - t.Parallel() re := require.New(t) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ @@ -97,7 +93,6 @@ func TestNewFileMasterKeyMissingPath(t *testing.T) { } func TestNewFileMasterKeyMissingFile(t *testing.T) { - t.Parallel() re := require.New(t) dir := t.TempDir() path := dir + "/key" @@ -113,7 +108,6 @@ func TestNewFileMasterKeyMissingFile(t *testing.T) { } func TestNewFileMasterKeyNotHexString(t *testing.T) { - t.Parallel() re := require.New(t) dir := t.TempDir() path := dir + "/key" @@ -130,7 +124,6 @@ func TestNewFileMasterKeyNotHexString(t *testing.T) { } func TestNewFileMasterKeyLengthMismatch(t *testing.T) { - t.Parallel() re := require.New(t) dir := t.TempDir() path := dir + "/key" @@ -147,7 +140,6 @@ func TestNewFileMasterKeyLengthMismatch(t *testing.T) { } func TestNewFileMasterKey(t *testing.T) { - t.Parallel() re := require.New(t) key := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 dir := t.TempDir() diff --git a/pkg/encryption/region_crypter_test.go b/pkg/encryption/region_crypter_test.go index 5fd9778a8c0..b1ca558063c 100644 --- a/pkg/encryption/region_crypter_test.go +++ b/pkg/encryption/region_crypter_test.go @@ -70,7 +70,6 @@ func (m *testKeyManager) GetKey(keyID uint64) (*encryptionpb.DataKey, error) { } func TestNilRegion(t *testing.T) { - t.Parallel() re := require.New(t) m := newTestKeyManager() region, err := EncryptRegion(nil, m) @@ -81,7 +80,6 @@ func TestNilRegion(t *testing.T) { } func TestEncryptRegionWithoutKeyManager(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -98,7 +96,6 @@ func TestEncryptRegionWithoutKeyManager(t *testing.T) { } func TestEncryptRegionWhileEncryptionDisabled(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -117,7 +114,6 @@ func TestEncryptRegionWhileEncryptionDisabled(t *testing.T) { } func TestEncryptRegion(t *testing.T) { - t.Parallel() re := require.New(t) startKey := []byte("abc") endKey := []byte("xyz") @@ -152,7 +148,6 @@ func TestEncryptRegion(t *testing.T) { } func TestDecryptRegionNotEncrypted(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -170,7 +165,6 @@ func TestDecryptRegionNotEncrypted(t *testing.T) { } func TestDecryptRegionWithoutKeyManager(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -186,7 +180,6 @@ func TestDecryptRegionWithoutKeyManager(t *testing.T) { } func TestDecryptRegionWhileKeyMissing(t *testing.T) { - t.Parallel() re := require.New(t) keyID := uint64(3) m := newTestKeyManager() @@ -207,7 +200,6 @@ func TestDecryptRegionWhileKeyMissing(t *testing.T) { } func TestDecryptRegion(t *testing.T) { - t.Parallel() re := require.New(t) keyID := uint64(1) startKey := []byte("abc") diff --git a/pkg/errs/errs_test.go b/pkg/errs/errs_test.go index d76c02dc110..01b7de461b8 100644 --- a/pkg/errs/errs_test.go +++ b/pkg/errs/errs_test.go @@ -43,7 +43,7 @@ func (w *testingWriter) Write(p []byte) (n int, err error) { return n, nil } -func (w *testingWriter) Sync() error { +func (*testingWriter) Sync() error { return nil } @@ -97,7 +97,6 @@ func TestError(t *testing.T) { } func TestErrorEqual(t *testing.T) { - t.Parallel() re := require.New(t) err1 := ErrSchedulerNotFound.FastGenByArgs() err2 := ErrSchedulerNotFound.FastGenByArgs() @@ -125,7 +124,7 @@ func TestErrorEqual(t *testing.T) { re.False(errors.ErrorEqual(err1, err2)) } -func TestZapError(t *testing.T) { +func TestZapError(_ *testing.T) { err := errors.New("test") log.Info("test", ZapError(err)) err1 := ErrSchedulerNotFound @@ -134,7 +133,6 @@ func TestZapError(t *testing.T) { } func TestErrorWithStack(t *testing.T) { - t.Parallel() re := require.New(t) conf := &log.Config{Level: "debug", File: log.FileLogConfig{}, DisableTimestamp: true} lg := newZapTestLogger(conf) diff --git a/pkg/mcs/metastorage/server/grpc_service.go b/pkg/mcs/metastorage/server/grpc_service.go index f5de50765e8..f018dc72f9f 100644 --- a/pkg/mcs/metastorage/server/grpc_service.go +++ b/pkg/mcs/metastorage/server/grpc_service.go @@ -39,13 +39,13 @@ var ( var _ meta_storagepb.MetaStorageServer = (*Service)(nil) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index 508f37fe069..dfb2babe676 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -112,10 +112,13 @@ func (rmc *ControllerConfig) Adjust(meta *configutil.ConfigMetaData) { if rmc == nil { return } - rmc.RequestUnit.Adjust() - - configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, defaultDegradedModeWaitDuration) - configutil.AdjustDuration(&rmc.LTBMaxWaitDuration, defaultMaxWaitDuration) + rmc.RequestUnit.Adjust(meta.Child("request-unit")) + if !meta.IsDefined("degraded-mode-wait-duration") { + configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, defaultDegradedModeWaitDuration) + } + if !meta.IsDefined("ltb-max-wait-duration") { + configutil.AdjustDuration(&rmc.LTBMaxWaitDuration, defaultMaxWaitDuration) + } failpoint.Inject("enableDegradedMode", func() { configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, time.Second) }) @@ -144,30 +147,30 @@ type RequestUnitConfig struct { } // Adjust adjusts the configuration and initializes it with the default value if necessary. -func (ruc *RequestUnitConfig) Adjust() { +func (ruc *RequestUnitConfig) Adjust(meta *configutil.ConfigMetaData) { if ruc == nil { return } - if ruc.ReadBaseCost == 0 { - ruc.ReadBaseCost = defaultReadBaseCost + if !meta.IsDefined("read-base-cost") { + configutil.AdjustFloat64(&ruc.ReadBaseCost, defaultReadBaseCost) } - if ruc.ReadPerBatchBaseCost == 0 { - ruc.ReadPerBatchBaseCost = defaultReadPerBatchBaseCost + if !meta.IsDefined("read-per-batch-base-cost") { + configutil.AdjustFloat64(&ruc.ReadPerBatchBaseCost, defaultReadPerBatchBaseCost) } - if ruc.ReadCostPerByte == 0 { - ruc.ReadCostPerByte = defaultReadCostPerByte + if !meta.IsDefined("read-cost-per-byte") { + configutil.AdjustFloat64(&ruc.ReadCostPerByte, defaultReadCostPerByte) } - if ruc.WriteBaseCost == 0 { - ruc.WriteBaseCost = defaultWriteBaseCost + if !meta.IsDefined("write-base-cost") { + configutil.AdjustFloat64(&ruc.WriteBaseCost, defaultWriteBaseCost) } - if ruc.WritePerBatchBaseCost == 0 { - ruc.WritePerBatchBaseCost = defaultWritePerBatchBaseCost + if !meta.IsDefined("write-per-batch-base-cost") { + configutil.AdjustFloat64(&ruc.WritePerBatchBaseCost, defaultWritePerBatchBaseCost) } - if ruc.WriteCostPerByte == 0 { - ruc.WriteCostPerByte = defaultWriteCostPerByte + if !meta.IsDefined("write-cost-per-byte") { + configutil.AdjustFloat64(&ruc.WriteCostPerByte, defaultWriteCostPerByte) } - if ruc.CPUMsCost == 0 { - ruc.CPUMsCost = defaultCPUMsCost + if !meta.IsDefined("read-cpu-ms-cost") { + configutil.AdjustFloat64(&ruc.CPUMsCost, defaultCPUMsCost) } } @@ -201,11 +204,11 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { configutil.AdjustCommandLineString(flagSet, &c.ListenAddr, "listen-addr") configutil.AdjustCommandLineString(flagSet, &c.AdvertiseListenAddr, "advertise-listen-addr") - return c.Adjust(meta, false) + return c.Adjust(meta) } // Adjust is used to adjust the resource manager configurations. -func (c *Config) Adjust(meta *toml.MetaData, reloading bool) error { +func (c *Config) Adjust(meta *toml.MetaData) error { configMetaData := configutil.NewConfigMetadata(meta) if err := configMetaData.CheckUndecoded(); err != nil { c.WarningMsgs = append(c.WarningMsgs, err.Error()) diff --git a/pkg/mcs/resourcemanager/server/config_test.go b/pkg/mcs/resourcemanager/server/config_test.go index 64fd133ea73..2d57100468e 100644 --- a/pkg/mcs/resourcemanager/server/config_test.go +++ b/pkg/mcs/resourcemanager/server/config_test.go @@ -39,7 +39,7 @@ read-cpu-ms-cost = 5.0 cfg := NewConfig() meta, err := toml.Decode(cfgData, &cfg) re.NoError(err) - err = cfg.Adjust(&meta, false) + err = cfg.Adjust(&meta) re.NoError(err) re.Equal(time.Second*2, cfg.Controller.DegradedModeWaitDuration.Duration) diff --git a/pkg/mcs/resourcemanager/server/grpc_service.go b/pkg/mcs/resourcemanager/server/grpc_service.go index cf985a14764..2f35042c48f 100644 --- a/pkg/mcs/resourcemanager/server/grpc_service.go +++ b/pkg/mcs/resourcemanager/server/grpc_service.go @@ -41,13 +41,13 @@ var ( var _ rmpb.ResourceManagerServer = (*Service)(nil) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } @@ -94,7 +94,7 @@ func (s *Service) checkServing() error { } // GetResourceGroup implements ResourceManagerServer.GetResourceGroup. -func (s *Service) GetResourceGroup(ctx context.Context, req *rmpb.GetResourceGroupRequest) (*rmpb.GetResourceGroupResponse, error) { +func (s *Service) GetResourceGroup(_ context.Context, req *rmpb.GetResourceGroupRequest) (*rmpb.GetResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -108,7 +108,7 @@ func (s *Service) GetResourceGroup(ctx context.Context, req *rmpb.GetResourceGro } // ListResourceGroups implements ResourceManagerServer.ListResourceGroups. -func (s *Service) ListResourceGroups(ctx context.Context, req *rmpb.ListResourceGroupsRequest) (*rmpb.ListResourceGroupsResponse, error) { +func (s *Service) ListResourceGroups(_ context.Context, req *rmpb.ListResourceGroupsRequest) (*rmpb.ListResourceGroupsResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -123,7 +123,7 @@ func (s *Service) ListResourceGroups(ctx context.Context, req *rmpb.ListResource } // AddResourceGroup implements ResourceManagerServer.AddResourceGroup. -func (s *Service) AddResourceGroup(ctx context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { +func (s *Service) AddResourceGroup(_ context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -135,7 +135,7 @@ func (s *Service) AddResourceGroup(ctx context.Context, req *rmpb.PutResourceGro } // DeleteResourceGroup implements ResourceManagerServer.DeleteResourceGroup. -func (s *Service) DeleteResourceGroup(ctx context.Context, req *rmpb.DeleteResourceGroupRequest) (*rmpb.DeleteResourceGroupResponse, error) { +func (s *Service) DeleteResourceGroup(_ context.Context, req *rmpb.DeleteResourceGroupRequest) (*rmpb.DeleteResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -147,7 +147,7 @@ func (s *Service) DeleteResourceGroup(ctx context.Context, req *rmpb.DeleteResou } // ModifyResourceGroup implements ResourceManagerServer.ModifyResourceGroup. -func (s *Service) ModifyResourceGroup(ctx context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { +func (s *Service) ModifyResourceGroup(_ context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } diff --git a/pkg/mcs/scheduling/server/apis/v1/api.go b/pkg/mcs/scheduling/server/apis/v1/api.go index 36451e5f031..be3277f3fc6 100644 --- a/pkg/mcs/scheduling/server/apis/v1/api.go +++ b/pkg/mcs/scheduling/server/apis/v1/api.go @@ -1292,7 +1292,7 @@ func scatterRegions(c *gin.Context) { if !ok { return 0, nil, errors.New("regions_id is invalid") } - return handler.ScatterRegionsByID(ids, group, retryLimit, false) + return handler.ScatterRegionsByID(ids, group, retryLimit) }() if err != nil { c.String(http.StatusInternalServerError, err.Error()) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 1b915b6874d..7ee7ae88cd1 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -473,7 +473,7 @@ func (c *Cluster) runMetricsCollectionJob() { select { case <-c.ctx.Done(): log.Info("metrics are reset") - c.resetMetrics() + resetMetrics() log.Info("metrics collection job has been stopped") return case <-ticker.C: @@ -487,7 +487,7 @@ func (c *Cluster) collectMetrics() { stores := c.GetStores() for _, s := range stores { statsMap.Observe(s) - statsMap.ObserveHotStat(s, c.hotStat.StoresStats) + statistics.ObserveHotStat(s, c.hotStat.StoresStats) } statsMap.Collect() @@ -504,7 +504,7 @@ func (c *Cluster) collectMetrics() { c.RegionsInfo.CollectWaitLockMetrics() } -func (c *Cluster) resetMetrics() { +func resetMetrics() { statistics.Reset() schedulers.ResetSchedulerMetrics() schedule.ResetHotSpotMetrics() diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 3e347afc12e..148a7015d11 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -294,7 +294,7 @@ func (o *PersistConfig) SetScheduleConfig(cfg *sc.ScheduleConfig) { } // AdjustScheduleCfg adjusts the schedule config during the initialization. -func (o *PersistConfig) AdjustScheduleCfg(scheduleCfg *sc.ScheduleConfig) { +func AdjustScheduleCfg(scheduleCfg *sc.ScheduleConfig) { // In case we add new default schedulers. for _, ps := range sc.DefaultSchedulers { if slice.NoneOf(scheduleCfg.Schedulers, func(i int) bool { @@ -374,7 +374,7 @@ func (o *PersistConfig) IsUseJointConsensus() bool { } // GetKeyType returns the key type. -func (o *PersistConfig) GetKeyType() constant.KeyType { +func (*PersistConfig) GetKeyType() constant.KeyType { return constant.StringToKeyType("table") } @@ -685,7 +685,7 @@ func (o *PersistConfig) SetSplitMergeInterval(splitMergeInterval time.Duration) } // SetHaltScheduling set HaltScheduling. -func (o *PersistConfig) SetHaltScheduling(halt bool, source string) { +func (o *PersistConfig) SetHaltScheduling(halt bool, _ string) { v := o.GetScheduleConfig().Clone() v.HaltScheduling = halt o.SetScheduleConfig(v) @@ -735,25 +735,25 @@ func (o *PersistConfig) IsRaftKV2() bool { // AddSchedulerCfg adds the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (o *PersistConfig) AddSchedulerCfg(string, []string) {} +func (*PersistConfig) AddSchedulerCfg(string, []string) {} // RemoveSchedulerCfg removes the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (o *PersistConfig) RemoveSchedulerCfg(tp string) {} +func (*PersistConfig) RemoveSchedulerCfg(string) {} // CheckLabelProperty checks if the label property is satisfied. -func (o *PersistConfig) CheckLabelProperty(typ string, labels []*metapb.StoreLabel) bool { +func (*PersistConfig) CheckLabelProperty(string, []*metapb.StoreLabel) bool { return false } // IsTraceRegionFlow returns if the region flow is tracing. // If the accuracy cannot reach 0.1 MB, it is considered not. -func (o *PersistConfig) IsTraceRegionFlow() bool { +func (*PersistConfig) IsTraceRegionFlow() bool { return false } // Persist saves the configuration to the storage. -func (o *PersistConfig) Persist(storage endpoint.ConfigStorage) error { +func (*PersistConfig) Persist(endpoint.ConfigStorage) error { return nil } diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 8db5e656279..d1ca99bd36d 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -129,14 +129,14 @@ func (cw *Watcher) initializeConfigWatcher() error { return err } log.Info("update scheduling config", zap.Reflect("new", cfg)) - cw.AdjustScheduleCfg(&cfg.Schedule) + AdjustScheduleCfg(&cfg.Schedule) cw.SetClusterVersion(&cfg.ClusterVersion) cw.SetScheduleConfig(&cfg.Schedule) cw.SetReplicationConfig(&cfg.Replication) cw.SetStoreConfig(&cfg.Store) return nil } - deleteFn := func(kv *mvccpb.KeyValue) error { + deleteFn := func(*mvccpb.KeyValue) error { return nil } cw.configWatcher = etcdutil.NewLoopWatcher( diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index ebce73e3303..62ec1c1118f 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -45,13 +45,13 @@ var ( ) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } @@ -169,7 +169,7 @@ func (s *Service) RegionHeartbeat(stream schedulingpb.Scheduling_RegionHeartbeat } // StoreHeartbeat implements gRPC SchedulingServer. -func (s *Service) StoreHeartbeat(ctx context.Context, request *schedulingpb.StoreHeartbeatRequest) (*schedulingpb.StoreHeartbeatResponse, error) { +func (s *Service) StoreHeartbeat(_ context.Context, request *schedulingpb.StoreHeartbeatRequest) (*schedulingpb.StoreHeartbeatResponse, error) { c := s.GetCluster() if c == nil { // TODO: add metrics @@ -203,7 +203,7 @@ func (s *Service) SplitRegions(ctx context.Context, request *schedulingpb.SplitR } // ScatterRegions implements gRPC SchedulingServer. -func (s *Service) ScatterRegions(ctx context.Context, request *schedulingpb.ScatterRegionsRequest) (*schedulingpb.ScatterRegionsResponse, error) { +func (s *Service) ScatterRegions(_ context.Context, request *schedulingpb.ScatterRegionsRequest) (*schedulingpb.ScatterRegionsResponse, error) { c := s.GetCluster() if c == nil { return &schedulingpb.ScatterRegionsResponse{Header: s.notBootstrappedHeader()}, nil @@ -235,7 +235,7 @@ func (s *Service) ScatterRegions(ctx context.Context, request *schedulingpb.Scat } // GetOperator gets information about the operator belonging to the specify region. -func (s *Service) GetOperator(ctx context.Context, request *schedulingpb.GetOperatorRequest) (*schedulingpb.GetOperatorResponse, error) { +func (s *Service) GetOperator(_ context.Context, request *schedulingpb.GetOperatorRequest) (*schedulingpb.GetOperatorResponse, error) { c := s.GetCluster() if c == nil { return &schedulingpb.GetOperatorResponse{Header: s.notBootstrappedHeader()}, nil @@ -262,7 +262,7 @@ func (s *Service) GetOperator(ctx context.Context, request *schedulingpb.GetOper } // AskBatchSplit implements gRPC SchedulingServer. -func (s *Service) AskBatchSplit(ctx context.Context, request *schedulingpb.AskBatchSplitRequest) (*schedulingpb.AskBatchSplitResponse, error) { +func (s *Service) AskBatchSplit(_ context.Context, request *schedulingpb.AskBatchSplitRequest) (*schedulingpb.AskBatchSplitResponse, error) { c := s.GetCluster() if c == nil { return &schedulingpb.AskBatchSplitResponse{Header: s.notBootstrappedHeader()}, nil diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index d8a8dd3e609..ea90b9d4e49 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -109,7 +109,7 @@ func NewWatcher( func (rw *Watcher) initializeRuleWatcher() error { var suspectKeyRanges *core.KeyRanges - preEventsFn := func(events []*clientv3.Event) error { + preEventsFn := func([]*clientv3.Event) error { // It will be locked until the postEventsFn is finished. rw.ruleManager.Lock() rw.patch = rw.ruleManager.BeginPatch() @@ -149,10 +149,9 @@ func (rw *Watcher) initializeRuleWatcher() error { suspectKeyRanges.Append(rule.StartKey, rule.EndKey) } return nil - } else { - log.Warn("unknown key when updating placement rule", zap.String("key", key)) - return nil } + log.Warn("unknown key when updating placement rule", zap.String("key", key)) + return nil } deleteFn := func(kv *mvccpb.KeyValue) error { key := string(kv.Key) @@ -181,12 +180,11 @@ func (rw *Watcher) initializeRuleWatcher() error { suspectKeyRanges.Append(rule.StartKey, rule.EndKey) } return nil - } else { - log.Warn("unknown key when deleting placement rule", zap.String("key", key)) - return nil } + log.Warn("unknown key when deleting placement rule", zap.String("key", key)) + return nil } - postEventsFn := func(events []*clientv3.Event) error { + postEventsFn := func([]*clientv3.Event) error { defer rw.ruleManager.Unlock() if err := rw.ruleManager.TryCommitPatchLocked(rw.patch); err != nil { log.Error("failed to commit patch", zap.Error(err)) @@ -213,7 +211,7 @@ func (rw *Watcher) initializeRuleWatcher() error { func (rw *Watcher) initializeRegionLabelWatcher() error { prefixToTrim := rw.regionLabelPathPrefix + "/" // TODO: use txn in region labeler. - preEventsFn := func(events []*clientv3.Event) error { + preEventsFn := func([]*clientv3.Event) error { // It will be locked until the postEventsFn is finished. rw.regionLabeler.Lock() return nil @@ -231,7 +229,7 @@ func (rw *Watcher) initializeRegionLabelWatcher() error { log.Info("delete region label rule", zap.String("key", key)) return rw.regionLabeler.DeleteLabelRuleLocked(strings.TrimPrefix(key, prefixToTrim)) } - postEventsFn := func(events []*clientv3.Event) error { + postEventsFn := func([]*clientv3.Event) error { defer rw.regionLabeler.Unlock() rw.regionLabeler.BuildRangeListLocked() return nil diff --git a/pkg/mcs/server/server.go b/pkg/mcs/server/server.go index 2c008e8f5e8..6aec799278c 100644 --- a/pkg/mcs/server/server.go +++ b/pkg/mcs/server/server.go @@ -171,7 +171,7 @@ func (bs *BaseServer) StartTimestamp() int64 { // CloseClientConns closes all client connections. func (bs *BaseServer) CloseClientConns() { - bs.clientConns.Range(func(key, value any) bool { + bs.clientConns.Range(func(_, value any) bool { conn := value.(*grpc.ClientConn) if err := conn.Close(); err != nil { log.Error("close client connection meet error") diff --git a/pkg/mcs/tso/server/config.go b/pkg/mcs/tso/server/config.go index eedf3a2f1b1..c117dd72e38 100644 --- a/pkg/mcs/tso/server/config.go +++ b/pkg/mcs/tso/server/config.go @@ -177,11 +177,11 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { configutil.AdjustCommandLineString(flagSet, &c.ListenAddr, "listen-addr") configutil.AdjustCommandLineString(flagSet, &c.AdvertiseListenAddr, "advertise-listen-addr") - return c.Adjust(meta, false) + return c.Adjust(meta) } // Adjust is used to adjust the TSO configurations. -func (c *Config) Adjust(meta *toml.MetaData, reloading bool) error { +func (c *Config) Adjust(meta *toml.MetaData) error { configMetaData := configutil.NewConfigMetadata(meta) if err := configMetaData.CheckUndecoded(); err != nil { c.WarningMsgs = append(c.WarningMsgs, err.Error()) diff --git a/pkg/mcs/tso/server/config_test.go b/pkg/mcs/tso/server/config_test.go index 9f5bc298964..2cb9c8e019a 100644 --- a/pkg/mcs/tso/server/config_test.go +++ b/pkg/mcs/tso/server/config_test.go @@ -83,7 +83,7 @@ max-gap-reset-ts = "1h" cfg := NewConfig() meta, err := toml.Decode(cfgData, &cfg) re.NoError(err) - err = cfg.Adjust(&meta, false) + err = cfg.Adjust(&meta) re.NoError(err) re.Equal("tso-test-name", cfg.GetName()) diff --git a/pkg/mcs/tso/server/grpc_service.go b/pkg/mcs/tso/server/grpc_service.go index 31a74f2a688..03250d9ed37 100644 --- a/pkg/mcs/tso/server/grpc_service.go +++ b/pkg/mcs/tso/server/grpc_service.go @@ -42,13 +42,13 @@ var ( var _ tsopb.TSOServer = (*Service)(nil) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } @@ -135,7 +135,7 @@ func (s *Service) Tso(stream tsopb.TSO_TsoServer) error { // FindGroupByKeyspaceID returns the keyspace group that the keyspace belongs to. func (s *Service) FindGroupByKeyspaceID( - ctx context.Context, request *tsopb.FindGroupByKeyspaceIDRequest, + _ context.Context, request *tsopb.FindGroupByKeyspaceIDRequest, ) (*tsopb.FindGroupByKeyspaceIDResponse, error) { respKeyspaceGroup := request.GetHeader().GetKeyspaceGroupId() if errorType, err := s.validRequest(request.GetHeader()); err != nil { @@ -189,7 +189,7 @@ func (s *Service) FindGroupByKeyspaceID( // GetMinTS gets the minimum timestamp across all keyspace groups served by the TSO server // who receives and handles the request. func (s *Service) GetMinTS( - ctx context.Context, request *tsopb.GetMinTSRequest, + _ context.Context, request *tsopb.GetMinTSRequest, ) (*tsopb.GetMinTSResponse, error) { respKeyspaceGroup := request.GetHeader().GetKeyspaceGroupId() if errorType, err := s.validRequest(request.GetHeader()); err != nil { diff --git a/pkg/mcs/tso/server/server.go b/pkg/mcs/tso/server/server.go index f5f46a29504..c38c7142730 100644 --- a/pkg/mcs/tso/server/server.go +++ b/pkg/mcs/tso/server/server.go @@ -250,7 +250,7 @@ func (s *Server) ResignPrimary(keyspaceID, keyspaceGroupID uint32) error { // AddServiceReadyCallback implements basicserver. // It adds callbacks when it's ready for providing tso service. -func (s *Server) AddServiceReadyCallback(callbacks ...func(context.Context) error) { +func (*Server) AddServiceReadyCallback(...func(context.Context) error) { // Do nothing here. The primary of each keyspace group assigned to this host // will respond to the requests accordingly. } @@ -278,7 +278,7 @@ func (s *Server) GetTSOAllocatorManager(keyspaceGroupID uint32) (*tso.AllocatorM } // IsLocalRequest checks if the forwarded host is the current host -func (s *Server) IsLocalRequest(forwardedHost string) bool { +func (*Server) IsLocalRequest(forwardedHost string) bool { // TODO: Check if the forwarded host is the current host. // The logic is depending on etcd service mode -- if the TSO service // uses the embedded etcd, check against ClientUrls; otherwise check @@ -310,13 +310,13 @@ func (s *Server) ValidateRequest(header *tsopb.RequestHeader) error { // GetExternalTS returns external timestamp from the cache or the persistent storage. // TODO: Implement GetExternalTS -func (s *Server) GetExternalTS() uint64 { +func (*Server) GetExternalTS() uint64 { return 0 } // SetExternalTS saves external timestamp to cache and the persistent storage. // TODO: Implement SetExternalTS -func (s *Server) SetExternalTS(externalTS uint64) error { +func (*Server) SetExternalTS(uint64) error { return nil } diff --git a/pkg/member/participant.go b/pkg/member/participant.go index 0bf3bcc547e..8a0ffadd31e 100644 --- a/pkg/member/participant.go +++ b/pkg/member/participant.go @@ -200,7 +200,7 @@ func (m *Participant) KeepLeader(ctx context.Context) { // PreCheckLeader does some pre-check before checking whether or not it's the leader. // It returns true if it passes the pre-check, false otherwise. -func (m *Participant) PreCheckLeader() error { +func (*Participant) PreCheckLeader() error { // No specific thing to check. Returns no error. return nil } @@ -280,7 +280,7 @@ func (m *Participant) IsSameLeader(leader participant) bool { } // CheckPriority checks whether there is another participant has higher priority and resign it as the leader if so. -func (m *Participant) CheckPriority(ctx context.Context) { +func (*Participant) CheckPriority(_ context.Context) { // TODO: implement weighted-election when it's in need } diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 6cf7ae143df..e5b3e39a502 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -123,7 +123,7 @@ func (mc *Cluster) AllocID() (uint64, error) { } // UpdateRegionsLabelLevelStats updates the label level stats for the regions. -func (mc *Cluster) UpdateRegionsLabelLevelStats(regions []*core.RegionInfo) {} +func (*Cluster) UpdateRegionsLabelLevelStats(_ []*core.RegionInfo) {} // LoadRegion puts region info without leader func (mc *Cluster) LoadRegion(regionID uint64, peerStoreIDs ...uint64) { diff --git a/pkg/mock/mockhbstream/mockhbstream.go b/pkg/mock/mockhbstream/mockhbstream.go index 289f31d63dd..ac8f246f86a 100644 --- a/pkg/mock/mockhbstream/mockhbstream.go +++ b/pkg/mock/mockhbstream/mockhbstream.go @@ -46,10 +46,10 @@ func (s HeartbeatStream) Send(m core.RegionHeartbeatResponse) error { } // SendMsg is used to send the message. -func (s HeartbeatStream) SendMsg(region *core.RegionInfo, msg *pdpb.RegionHeartbeatResponse) {} +func (HeartbeatStream) SendMsg(*core.RegionInfo, *pdpb.RegionHeartbeatResponse) {} // BindStream mock method. -func (s HeartbeatStream) BindStream(storeID uint64, stream hbstream.HeartbeatStream) {} +func (HeartbeatStream) BindStream(uint64, hbstream.HeartbeatStream) {} // Recv mocks method. func (s HeartbeatStream) Recv() core.RegionHeartbeatResponse { diff --git a/pkg/mock/mockhbstream/mockhbstream_test.go b/pkg/mock/mockhbstream/mockhbstream_test.go index a8e88f61aee..aa1ca85279b 100644 --- a/pkg/mock/mockhbstream/mockhbstream_test.go +++ b/pkg/mock/mockhbstream/mockhbstream_test.go @@ -29,7 +29,6 @@ import ( ) func TestActivity(t *testing.T) { - t.Parallel() re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/pkg/mock/mockid/mockid.go b/pkg/mock/mockid/mockid.go index 4c0e7540653..7b4902a6a04 100644 --- a/pkg/mock/mockid/mockid.go +++ b/pkg/mock/mockid/mockid.go @@ -38,6 +38,6 @@ func (alloc *IDAllocator) SetBase(newBase uint64) error { } // Rebase implements the IDAllocator interface. -func (alloc *IDAllocator) Rebase() error { +func (*IDAllocator) Rebase() error { return nil } diff --git a/pkg/movingaverage/avg_over_time_test.go b/pkg/movingaverage/avg_over_time_test.go index 43553d9d608..4a54e33d449 100644 --- a/pkg/movingaverage/avg_over_time_test.go +++ b/pkg/movingaverage/avg_over_time_test.go @@ -23,7 +23,6 @@ import ( ) func TestPulse(t *testing.T) { - t.Parallel() re := require.New(t) aot := NewAvgOverTime(5 * time.Second) // warm up @@ -43,7 +42,6 @@ func TestPulse(t *testing.T) { } func TestPulse2(t *testing.T) { - t.Parallel() re := require.New(t) dur := 5 * time.Second aot := NewAvgOverTime(dur) @@ -57,7 +55,6 @@ func TestPulse2(t *testing.T) { } func TestChange(t *testing.T) { - t.Parallel() re := require.New(t) aot := NewAvgOverTime(5 * time.Second) @@ -91,7 +88,6 @@ func TestChange(t *testing.T) { } func TestMinFilled(t *testing.T) { - t.Parallel() re := require.New(t) interval := 10 * time.Second rate := 1.0 @@ -108,7 +104,6 @@ func TestMinFilled(t *testing.T) { } func TestUnstableInterval(t *testing.T) { - t.Parallel() re := require.New(t) aot := NewAvgOverTime(5 * time.Second) re.Equal(0., aot.Get()) diff --git a/pkg/movingaverage/max_filter_test.go b/pkg/movingaverage/max_filter_test.go index bba770cecc2..7d3906ec93c 100644 --- a/pkg/movingaverage/max_filter_test.go +++ b/pkg/movingaverage/max_filter_test.go @@ -21,7 +21,6 @@ import ( ) func TestMaxFilter(t *testing.T) { - t.Parallel() re := require.New(t) var empty float64 = 0 data := []float64{2, 1, 3, 4, 1, 1, 3, 3, 2, 0, 5} diff --git a/pkg/movingaverage/moving_average_test.go b/pkg/movingaverage/moving_average_test.go index 49c20637c20..fd0a1a9fcf3 100644 --- a/pkg/movingaverage/moving_average_test.go +++ b/pkg/movingaverage/moving_average_test.go @@ -72,7 +72,6 @@ func checkInstantaneous(re *require.Assertions, ma MovingAvg) { } func TestMedianFilter(t *testing.T) { - t.Parallel() re := require.New(t) var empty float64 = 0 data := []float64{2, 4, 2, 800, 600, 6, 3} @@ -92,7 +91,6 @@ type testCase struct { } func TestMovingAvg(t *testing.T) { - t.Parallel() re := require.New(t) var empty float64 = 0 data := []float64{1, 1, 1, 1, 5, 1, 1, 1} diff --git a/pkg/movingaverage/weight_allocator_test.go b/pkg/movingaverage/weight_allocator_test.go index 631a71f10c9..405d8f72876 100644 --- a/pkg/movingaverage/weight_allocator_test.go +++ b/pkg/movingaverage/weight_allocator_test.go @@ -21,7 +21,6 @@ import ( ) func TestWeightAllocator(t *testing.T) { - t.Parallel() re := require.New(t) checkSumFunc := func(wa *WeightAllocator, length int) { diff --git a/pkg/ratelimit/concurrency_limiter_test.go b/pkg/ratelimit/concurrency_limiter_test.go index e77c79c8ebc..a397b6ac50f 100644 --- a/pkg/ratelimit/concurrency_limiter_test.go +++ b/pkg/ratelimit/concurrency_limiter_test.go @@ -26,7 +26,6 @@ import ( ) func TestConcurrencyLimiter(t *testing.T) { - t.Parallel() re := require.New(t) cl := NewConcurrencyLimiter(10) for i := 0; i < 10; i++ { diff --git a/pkg/ratelimit/controller_test.go b/pkg/ratelimit/controller_test.go index 48a5ee2054b..d4093555ba7 100644 --- a/pkg/ratelimit/controller_test.go +++ b/pkg/ratelimit/controller_test.go @@ -78,7 +78,6 @@ func runMulitLabelLimiter(t *testing.T, limiter *Controller, testCase []labelCas } func TestControllerWithConcurrencyLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewController(context.Background(), "grpc", nil) defer limiter.Close() @@ -109,7 +108,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { status := limiter.Update(label, o) re.NotZero(status & ConcurrencyNoChange) }, - checkStatusFunc: func(label string) {}, + checkStatusFunc: func(_ string) {}, }, { opt: UpdateConcurrencyLimiter(5), @@ -191,7 +190,6 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { } func TestBlockList(t *testing.T) { - t.Parallel() re := require.New(t) opts := []Option{AddLabelAllowList()} limiter := NewController(context.Background(), "grpc", nil) @@ -213,7 +211,6 @@ func TestBlockList(t *testing.T) { } func TestControllerWithQPSLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewController(context.Background(), "grpc", nil) defer limiter.Close() @@ -243,7 +240,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { status := limiter.Update(label, o) re.NotZero(status & QPSNoChange) }, - checkStatusFunc: func(label string) {}, + checkStatusFunc: func(_ string) {}, }, { opt: UpdateQPSLimiter(5, 5), @@ -323,7 +320,6 @@ func TestControllerWithQPSLimiter(t *testing.T) { } func TestControllerWithTwoLimiters(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewController(context.Background(), "grpc", nil) defer limiter.Close() diff --git a/pkg/ratelimit/limiter_test.go b/pkg/ratelimit/limiter_test.go index fabb9d98917..36f339b47ac 100644 --- a/pkg/ratelimit/limiter_test.go +++ b/pkg/ratelimit/limiter_test.go @@ -40,7 +40,6 @@ func (r *releaseUtil) append(d DoneFunc) { } func TestWithConcurrencyLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := newLimiter() @@ -103,7 +102,6 @@ func TestWithConcurrencyLimiter(t *testing.T) { } func TestWithQPSLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := newLimiter() status := limiter.updateQPSConfig(float64(rate.Every(time.Second)), 1) @@ -177,7 +175,6 @@ func TestWithQPSLimiter(t *testing.T) { } func TestWithTwoLimiters(t *testing.T) { - t.Parallel() re := require.New(t) cfg := &DimensionConfig{ QPS: 100, diff --git a/pkg/ratelimit/ratelimiter_test.go b/pkg/ratelimit/ratelimiter_test.go index 35b355e7b21..f16bb6a83d2 100644 --- a/pkg/ratelimit/ratelimiter_test.go +++ b/pkg/ratelimit/ratelimiter_test.go @@ -22,7 +22,6 @@ import ( ) func TestRateLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewRateLimiter(100, 100) diff --git a/pkg/ratelimit/runner.go b/pkg/ratelimit/runner.go index 661668af3b9..dd92a10179d 100644 --- a/pkg/ratelimit/runner.go +++ b/pkg/ratelimit/runner.go @@ -162,7 +162,7 @@ func NewSyncRunner() *SyncRunner { } // RunTask runs the task synchronously. -func (s *SyncRunner) RunTask(ctx context.Context, opt TaskOpts, f func(context.Context)) error { +func (*SyncRunner) RunTask(ctx context.Context, _ TaskOpts, f func(context.Context)) error { f(ctx) return nil } diff --git a/pkg/ratelimit/runner_test.go b/pkg/ratelimit/runner_test.go index 8a9eff77379..9b8dca231d1 100644 --- a/pkg/ratelimit/runner_test.go +++ b/pkg/ratelimit/runner_test.go @@ -36,7 +36,7 @@ func TestAsyncRunner(t *testing.T) { err := runner.RunTask(context.Background(), TaskOpts{ TaskName: "test1", Limit: limiter, - }, func(ctx context.Context) { + }, func(context.Context) { defer wg.Done() time.Sleep(100 * time.Millisecond) }) @@ -55,7 +55,7 @@ func TestAsyncRunner(t *testing.T) { err := runner.RunTask(context.Background(), TaskOpts{ TaskName: "test2", Limit: limiter, - }, func(ctx context.Context) { + }, func(context.Context) { defer wg.Done() time.Sleep(100 * time.Millisecond) }) diff --git a/pkg/replication/replication_mode_test.go b/pkg/replication/replication_mode_test.go index 038807d7d94..d19a4f70d66 100644 --- a/pkg/replication/replication_mode_test.go +++ b/pkg/replication/replication_mode_test.go @@ -144,7 +144,7 @@ func (rep *mockFileReplicator) GetMembers() ([]*pdpb.Member, error) { return members, nil } -func (rep *mockFileReplicator) ReplicateFileToMember(ctx context.Context, member *pdpb.Member, name string, data []byte) error { +func (rep *mockFileReplicator) ReplicateFileToMember(_ context.Context, member *pdpb.Member, _ string, data []byte) error { if err := rep.errors[member.GetMemberId()]; err != nil { return err } diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 1ce7bddd1dc..821c21cc119 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -94,7 +94,7 @@ func NewMergeChecker(ctx context.Context, cluster sche.CheckerCluster, conf conf } // GetType return MergeChecker's type -func (m *MergeChecker) GetType() string { +func (*MergeChecker) GetType() string { return "merge-checker" } diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index 3e23f3bdcac..6324fd2ca10 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -76,7 +76,7 @@ func NewReplicaChecker(cluster sche.CheckerCluster, conf config.CheckerConfigPro } // GetType return ReplicaChecker's type -func (r *ReplicaChecker) GetType() string { +func (*ReplicaChecker) GetType() string { return replicaCheckerName } diff --git a/pkg/schedule/checker/replica_strategy.go b/pkg/schedule/checker/replica_strategy.go index fdf05a0c479..e234189fe96 100644 --- a/pkg/schedule/checker/replica_strategy.go +++ b/pkg/schedule/checker/replica_strategy.go @@ -97,8 +97,13 @@ func (s *ReplicaStrategy) SelectStoreToFix(coLocationStores []*core.StoreInfo, o return 0, false } // trick to avoid creating a slice with `old` removed. - s.swapStoreToFirst(coLocationStores, old) - return s.SelectStoreToAdd(coLocationStores[1:]) + swapStoreToFirst(coLocationStores, old) + // If the coLocationStores only has one store, no need to remove. + // Otherwise, the other stores will be filtered. + if len(coLocationStores) > 1 { + coLocationStores = coLocationStores[1:] + } + return s.SelectStoreToAdd(coLocationStores) } // SelectStoreToImprove returns a store to replace oldStore. The location @@ -108,7 +113,7 @@ func (s *ReplicaStrategy) SelectStoreToImprove(coLocationStores []*core.StoreInf return 0, false } // trick to avoid creating a slice with `old` removed. - s.swapStoreToFirst(coLocationStores, old) + swapStoreToFirst(coLocationStores, old) oldStore := s.cluster.GetStore(old) if oldStore == nil { return 0, false @@ -122,7 +127,7 @@ func (s *ReplicaStrategy) SelectStoreToImprove(coLocationStores []*core.StoreInf return s.SelectStoreToAdd(coLocationStores[1:], filters...) } -func (s *ReplicaStrategy) swapStoreToFirst(stores []*core.StoreInfo, id uint64) { +func swapStoreToFirst(stores []*core.StoreInfo, id uint64) { for i, s := range stores { if s.GetID() == id { stores[0], stores[i] = stores[i], stores[0] diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 464f5e97be8..66b958911b1 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -107,7 +107,7 @@ func NewRuleChecker(ctx context.Context, cluster sche.CheckerCluster, ruleManage } // GetType returns RuleChecker's Type -func (c *RuleChecker) GetType() string { +func (*RuleChecker) GetType() string { return ruleCheckerName } @@ -347,7 +347,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. if region.GetLeader().GetId() != peer.GetId() && rf.Rule.Role == placement.Leader { ruleCheckerFixLeaderRoleCounter.Inc() if c.allowLeader(fit, peer) { - return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, region.GetLeader().GetStoreId(), peer.GetStoreId(), []uint64{}, 0) + return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, peer.GetStoreId(), []uint64{}, 0) } ruleCheckerNotAllowLeaderCounter.Inc() return nil, errPeerCannotBeLeader @@ -356,7 +356,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. ruleCheckerFixFollowerRoleCounter.Inc() for _, p := range region.GetPeers() { if c.allowLeader(fit, p) { - return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, peer.GetStoreId(), p.GetStoreId(), []uint64{}, 0) + return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, p.GetStoreId(), []uint64{}, 0) } } ruleCheckerNoNewLeaderCounter.Inc() diff --git a/pkg/schedule/checker/rule_checker_test.go b/pkg/schedule/checker/rule_checker_test.go index ccd6abdc098..e69b956134b 100644 --- a/pkg/schedule/checker/rule_checker_test.go +++ b/pkg/schedule/checker/rule_checker_test.go @@ -2112,3 +2112,63 @@ func (suite *ruleCheckerTestSuite) TestRemoveOrphanPeer() { suite.NotNil(op) suite.Equal("remove-orphan-peer", op.Desc()) } + +func (suite *ruleCheckerTestSuite) TestIssue7808() { + re := suite.Require() + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1", "disk_type": "mix"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2", "disk_type": "mix"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3", "disk_type": "ssd"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4", "disk_type": "ssd"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5", "disk_type": "ssd"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 3, 4, 1) + err := suite.ruleManager.SetRules([]*placement.Rule{ + { + GroupID: "pd", + ID: "1", + Role: placement.Voter, + Count: 2, + LabelConstraints: []placement.LabelConstraint{ + { + Key: "disk_type", + Values: []string{ + "ssd", + }, + Op: placement.In, + }, + }, + LocationLabels: []string{"host"}, + IsolationLevel: "host", + }, + { + GroupID: "pd", + ID: "2", + Role: placement.Follower, + Count: 1, + LabelConstraints: []placement.LabelConstraint{ + { + Key: "disk_type", + Values: []string{ + "mix", + }, + Op: placement.In, + }, + }, + LocationLabels: []string{"host"}, + IsolationLevel: "host", + }, + }) + re.NoError(err) + err = suite.ruleManager.DeleteRule(placement.DefaultGroupID, placement.DefaultRuleID) + re.NoError(err) + suite.cluster.SetStoreDown(1) + region := suite.cluster.GetRegion(1) + downPeer := []*pdpb.PeerStats{ + {Peer: region.GetStorePeer(1), DownSeconds: 6000}, + } + region = region.Clone(core.WithDownPeers(downPeer)) + suite.cluster.PutRegion(region) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + re.NotNil(op) + re.Equal("fast-replace-rule-down-peer", op.Desc()) + re.Contains(op.Brief(), "mv peer: store [1] to [2]") +} diff --git a/pkg/schedule/checker/split_checker.go b/pkg/schedule/checker/split_checker.go index 072bdcf7a2e..3a34eee8c90 100644 --- a/pkg/schedule/checker/split_checker.go +++ b/pkg/schedule/checker/split_checker.go @@ -51,7 +51,7 @@ func NewSplitChecker(cluster sche.CheckerCluster, ruleManager *placement.RuleMan } // GetType returns the checker type. -func (c *SplitChecker) GetType() string { +func (*SplitChecker) GetType() string { return "split-checker" } diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 56038ddcb09..abf4c776f8a 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -407,7 +407,7 @@ func (c *ScheduleConfig) Adjust(meta *configutil.ConfigMetaData, reloading bool) adjustSchedulers(&c.Schedulers, DefaultSchedulers) for k, b := range c.migrateConfigurationMap() { - v, err := c.parseDeprecatedFlag(meta, k, *b[0], *b[1]) + v, err := parseDeprecatedFlag(meta, k, *b[0], *b[1]) if err != nil { return err } @@ -456,7 +456,7 @@ func (c *ScheduleConfig) GetMaxMergeRegionKeys() uint64 { return c.MaxMergeRegionSize * 10000 } -func (c *ScheduleConfig) parseDeprecatedFlag(meta *configutil.ConfigMetaData, name string, old, new bool) (bool, error) { +func parseDeprecatedFlag(meta *configutil.ConfigMetaData, name string, old, new bool) (bool, error) { oldName, newName := "disable-"+name, "enable-"+name defineOld, defineNew := meta.IsDefined(oldName), meta.IsDefined(newName) switch { diff --git a/pkg/schedule/filter/candidates_test.go b/pkg/schedule/filter/candidates_test.go index 13e8ed661cc..0d805312ba7 100644 --- a/pkg/schedule/filter/candidates_test.go +++ b/pkg/schedule/filter/candidates_test.go @@ -48,9 +48,9 @@ func idComparer2(a, b *core.StoreInfo) int { type idFilter func(uint64) bool -func (f idFilter) Scope() string { return "idFilter" } -func (f idFilter) Type() filterType { return filterType(0) } -func (f idFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (idFilter) Scope() string { return "idFilter" } +func (idFilter) Type() filterType { return filterType(0) } +func (f idFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } @@ -58,7 +58,7 @@ func (f idFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo return statusStoreScoreDisallowed } -func (f idFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f idFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } diff --git a/pkg/schedule/filter/filters.go b/pkg/schedule/filter/filters.go index 0d188e69180..1838f0104f4 100644 --- a/pkg/schedule/filter/filters.go +++ b/pkg/schedule/filter/filters.go @@ -185,18 +185,18 @@ func (f *excludedFilter) Scope() string { return f.scope } -func (f *excludedFilter) Type() filterType { +func (*excludedFilter) Type() filterType { return excluded } -func (f *excludedFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if _, ok := f.sources[store.GetID()]; ok { return statusStoreAlreadyHasPeer } return statusOK } -func (f *excludedFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if _, ok := f.targets[store.GetID()]; ok { return statusStoreAlreadyHasPeer } @@ -215,15 +215,15 @@ func (f *storageThresholdFilter) Scope() string { return f.scope } -func (f *storageThresholdFilter) Type() filterType { +func (*storageThresholdFilter) Type() filterType { return storageThreshold } -func (f *storageThresholdFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (*storageThresholdFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } -func (f *storageThresholdFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (*storageThresholdFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if !store.IsLowSpace(conf.GetLowSpaceRatio()) { return statusOK } @@ -283,11 +283,11 @@ func (f *distinctScoreFilter) Scope() string { return f.scope } -func (f *distinctScoreFilter) Type() filterType { +func (*distinctScoreFilter) Type() filterType { return distinctScore } -func (f *distinctScoreFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*distinctScoreFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -387,7 +387,7 @@ func (f *StoreStateFilter) pauseLeaderTransfer(_ config.SharedConfigProvider, st return statusOK } -func (f *StoreStateFilter) slowStoreEvicted(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) slowStoreEvicted(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if store.EvictedAsSlowStore() { f.Reason = storeStateSlow return statusStoreRejectLeader @@ -583,12 +583,12 @@ func (f labelConstraintFilter) Scope() string { } // Type returns the name of the filter. -func (f labelConstraintFilter) Type() filterType { +func (labelConstraintFilter) Type() filterType { return labelConstraint } // Source filters stores when select them as schedule source. -func (f labelConstraintFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f labelConstraintFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if placement.MatchLabelConstraints(store, f.constraints) { return statusOK } @@ -634,11 +634,11 @@ func (f *ruleFitFilter) Scope() string { return f.scope } -func (f *ruleFitFilter) Type() filterType { +func (*ruleFitFilter) Type() filterType { return ruleFit } -func (f *ruleFitFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*ruleFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -687,11 +687,11 @@ func (f *ruleLeaderFitFilter) Scope() string { return f.scope } -func (f *ruleLeaderFitFilter) Type() filterType { +func (*ruleLeaderFitFilter) Type() filterType { return ruleLeader } -func (f *ruleLeaderFitFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*ruleLeaderFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -743,11 +743,11 @@ func (f *ruleWitnessFitFilter) Scope() string { return f.scope } -func (f *ruleWitnessFitFilter) Type() filterType { +func (*ruleWitnessFitFilter) Type() filterType { return ruleFit } -func (f *ruleWitnessFitFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*ruleWitnessFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -815,7 +815,7 @@ func (f *engineFilter) Scope() string { return f.scope } -func (f *engineFilter) Type() filterType { +func (*engineFilter) Type() filterType { return engine } @@ -858,7 +858,7 @@ func (f *specialUseFilter) Scope() string { return f.scope } -func (f *specialUseFilter) Type() filterType { +func (*specialUseFilter) Type() filterType { return specialUse } @@ -869,7 +869,7 @@ func (f *specialUseFilter) Source(conf config.SharedConfigProvider, store *core. return statusStoreNotMatchRule } -func (f *specialUseFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *specialUseFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if !f.constraint.MatchStore(store) { return statusOK } @@ -932,11 +932,11 @@ func (f *isolationFilter) Scope() string { return f.scope } -func (f *isolationFilter) Type() filterType { +func (*isolationFilter) Type() filterType { return isolation } -func (f *isolationFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (*isolationFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index 799cee7d90c..7cd015412c2 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -76,7 +76,7 @@ func NewRegionPendingFilter() RegionFilter { return ®ionPendingFilter{} } -func (f *regionPendingFilter) Select(region *core.RegionInfo) *plan.Status { +func (*regionPendingFilter) Select(region *core.RegionInfo) *plan.Status { if hasPendingPeers(region) { return statusRegionPendingPeer } @@ -91,7 +91,7 @@ func NewRegionDownFilter() RegionFilter { return ®ionDownFilter{} } -func (f *regionDownFilter) Select(region *core.RegionInfo) *plan.Status { +func (*regionDownFilter) Select(region *core.RegionInfo) *plan.Status { if hasDownPeers(region) { return statusRegionDownPeer } diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index a9b89e4e3a4..0541a2d6567 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -417,7 +417,7 @@ func (h *Handler) AddTransferLeaderOperator(regionID uint64, storeID uint64) err return errors.Errorf("region has no voter in store %v", storeID) } - op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, region.GetLeader().GetStoreId(), newLeader.GetStoreId(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, newLeader.GetStoreId(), []uint64{}, operator.OpAdmin) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) return err @@ -1157,7 +1157,7 @@ func (h *Handler) AccelerateRegionsScheduleInRanges(startKeys [][]byte, endKeys } // AdjustLimit adjusts the limit of regions to schedule. -func (h *Handler) AdjustLimit(limitStr string, defaultLimits ...int) (int, error) { +func (*Handler) AdjustLimit(limitStr string, defaultLimits ...int) (int, error) { limit := defaultRegionLimit if len(defaultLimits) > 0 { limit = defaultLimits[0] @@ -1181,7 +1181,7 @@ type ScatterRegionsResponse struct { } // BuildScatterRegionsResp builds ScatterRegionsResponse. -func (h *Handler) BuildScatterRegionsResp(opsCount int, failures map[uint64]error) *ScatterRegionsResponse { +func (*Handler) BuildScatterRegionsResp(opsCount int, failures map[uint64]error) *ScatterRegionsResponse { // If there existed any operator failed to be added into Operator Controller, add its regions into unProcessedRegions percentage := 100 if len(failures) > 0 { @@ -1217,7 +1217,7 @@ func (h *Handler) ScatterRegionsByRange(rawStartKey, rawEndKey string, group str } // ScatterRegionsByID scatters regions by id. -func (h *Handler) ScatterRegionsByID(ids []uint64, group string, retryLimit int, skipStoreLimit bool) (int, map[uint64]error, error) { +func (h *Handler) ScatterRegionsByID(ids []uint64, group string, retryLimit int) (int, map[uint64]error, error) { co := h.GetCoordinator() if co == nil { return 0, nil, errs.ErrNotBootstrapped.GenWithStackByArgs() diff --git a/pkg/schedule/labeler/labeler_test.go b/pkg/schedule/labeler/labeler_test.go index 364f79b7a14..bd51bab7d83 100644 --- a/pkg/schedule/labeler/labeler_test.go +++ b/pkg/schedule/labeler/labeler_test.go @@ -404,7 +404,7 @@ func TestLabelerRuleTTL(t *testing.T) { func checkRuleInMemoryAndStorage(re *require.Assertions, labeler *RegionLabeler, ruleID string, exist bool) { re.Equal(exist, labeler.labelRules[ruleID] != nil) existInStorage := false - labeler.storage.LoadRegionRules(func(k, v string) { + labeler.storage.LoadRegionRules(func(k, _ string) { if k == ruleID { existInStorage = true } diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 1c96128ab32..638230e3097 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -78,7 +78,7 @@ func CreateRemovePeerOperator(desc string, ci sche.SharedCluster, kind OpKind, r } // CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. -func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { +func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck). SetLeader(targetStoreID). SetLeaders(targetStoreIDs). @@ -86,7 +86,7 @@ func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *co } // CreateForceTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store forcible. -func CreateForceTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { +func CreateForceTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck, SkipPlacementRulesCheck). SetLeader(targetStoreID). EnableForceTargetLeader(). diff --git a/pkg/schedule/operator/create_operator_test.go b/pkg/schedule/operator/create_operator_test.go index 80c6cac4a04..d481334bbcb 100644 --- a/pkg/schedule/operator/create_operator_test.go +++ b/pkg/schedule/operator/create_operator_test.go @@ -423,7 +423,7 @@ func (suite *createOperatorTestSuite) TestCreateTransferLeaderOperator() { } for _, testCase := range testCases { region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.originPeers}, testCase.originPeers[0]) - op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.originPeers[0].StoreId, testCase.targetLeaderStoreID, []uint64{}, 0) + op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.targetLeaderStoreID, []uint64{}, 0) if testCase.isErr { re.Error(err) diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index f05c232904f..86e51fe70d6 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -222,7 +222,7 @@ func (oc *Controller) checkStaleOperator(op *Operator, step OpStep, region *core return false } -func (oc *Controller) getNextPushOperatorTime(step OpStep, now time.Time) time.Time { +func getNextPushOperatorTime(step OpStep, now time.Time) time.Time { nextTime := slowNotifyInterval switch step.(type) { case TransferLeader, PromoteLearner, ChangePeerV2Enter, ChangePeerV2Leave: @@ -270,7 +270,7 @@ func (oc *Controller) pollNeedDispatchRegion() (r *core.RegionInfo, next bool) { } // pushes with new notify time. - item.time = oc.getNextPushOperatorTime(step, now) + item.time = getNextPushOperatorTime(step, now) oc.opNotifierQueue.Push(item) return r, true } @@ -324,14 +324,16 @@ func (oc *Controller) AddWaitingOperator(ops ...*Operator) int { } continue } - oc.wop.PutOperator(op) + if isMerge { // count two merge operators as one, so wopStatus.ops[desc] should // not be updated here // TODO: call checkAddOperator ... + oc.wop.PutMergeOperators([]*Operator{op, ops[i+1]}) i++ added++ - oc.wop.PutOperator(ops[i]) + } else { + oc.wop.PutOperator(op) } operatorCounter.WithLabelValues(desc, "put").Inc() oc.wopStatus.incCount(desc) @@ -559,7 +561,7 @@ func (oc *Controller) addOperatorInner(op *Operator) bool { } } - oc.opNotifierQueue.Push(&operatorWithTime{op: op, time: oc.getNextPushOperatorTime(step, time.Now())}) + oc.opNotifierQueue.Push(&operatorWithTime{op: op, time: getNextPushOperatorTime(step, time.Now())}) operatorCounter.WithLabelValues(op.Desc(), "create").Inc() for _, counter := range op.Counters { counter.Inc() diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index f2f2b7305ce..d3c50667fe0 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -108,7 +108,7 @@ func (suite *operatorControllerTestSuite) TestGetOpInfluence() { re.True(op2.Start()) oc.SetOperator(op2) go func(ctx context.Context) { - suite.checkRemoveOperatorSuccess(re, oc, op1) + checkRemoveOperatorSuccess(re, oc, op1) for { select { case <-ctx.Done(): @@ -550,7 +550,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 5; i++ { op := NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: i}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op := NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: 1}) re.False(oc.AddOperator(op)) @@ -560,13 +560,13 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 10; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: i}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } tc.SetAllStoresLimit(storelimit.AddPeer, 60) for i := uint64(1); i <= 5; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: i}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op = NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: 1}) re.False(oc.AddOperator(op)) @@ -576,7 +576,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 5; i++ { op := NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op = NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.False(oc.AddOperator(op)) @@ -586,13 +586,13 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 10; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } tc.SetAllStoresLimit(storelimit.RemovePeer, 60) for i := uint64(1); i <= 5; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op = NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.False(oc.AddOperator(op)) @@ -860,7 +860,7 @@ func newRegionInfo(id uint64, startKey, endKey string, size, keys int64, leader ) } -func (suite *operatorControllerTestSuite) checkRemoveOperatorSuccess(re *require.Assertions, oc *Controller, op *Operator) { +func checkRemoveOperatorSuccess(re *require.Assertions, oc *Controller, op *Operator) { re.True(oc.RemoveOperator(op)) re.True(op.IsEnd()) re.Equal(op, oc.GetOperatorStatus(op.RegionID()).Operator) diff --git a/pkg/schedule/operator/operator_test.go b/pkg/schedule/operator/operator_test.go index 4719df9408b..693f5c17475 100644 --- a/pkg/schedule/operator/operator_test.go +++ b/pkg/schedule/operator/operator_test.go @@ -65,7 +65,7 @@ func (suite *operatorTestSuite) TearDownTest() { suite.cancel() } -func (suite *operatorTestSuite) newTestRegion(regionID uint64, leaderPeer uint64, peers ...[2]uint64) *core.RegionInfo { +func newTestRegion(regionID uint64, leaderPeer uint64, peers ...[2]uint64) *core.RegionInfo { var ( region metapb.Region leader *metapb.Peer @@ -87,7 +87,7 @@ func (suite *operatorTestSuite) newTestRegion(regionID uint64, leaderPeer uint64 func (suite *operatorTestSuite) TestOperatorStep() { re := suite.Require() - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) re.False(TransferLeader{FromStore: 1, ToStore: 2}.IsFinish(region)) re.True(TransferLeader{FromStore: 2, ToStore: 1}.IsFinish(region)) re.False(AddPeer{ToStore: 3, PeerID: 3}.IsFinish(region)) @@ -96,11 +96,7 @@ func (suite *operatorTestSuite) TestOperatorStep() { re.True(RemovePeer{FromStore: 3}.IsFinish(region)) } -func (suite *operatorTestSuite) newTestOperator(regionID uint64, kind OpKind, steps ...OpStep) *Operator { - return NewTestOperator(regionID, &metapb.RegionEpoch{}, kind, steps...) -} - -func (suite *operatorTestSuite) checkSteps(re *require.Assertions, op *Operator, steps []OpStep) { +func checkSteps(re *require.Assertions, op *Operator, steps []OpStep) { re.Len(steps, op.Len()) for i := range steps { re.Equal(steps[i], op.Step(i)) @@ -109,16 +105,16 @@ func (suite *operatorTestSuite) checkSteps(re *require.Assertions, op *Operator, func (suite *operatorTestSuite) TestOperator() { re := suite.Require() - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) // addPeer1, transferLeader1, removePeer3 steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 3, ToStore: 1}, RemovePeer{FromStore: 3}, } - op := suite.newTestOperator(1, OpAdmin|OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpAdmin|OpLeader|OpRegion, steps...) re.Equal(constant.Urgent, op.GetPriorityLevel()) - suite.checkSteps(re, op, steps) + checkSteps(re, op, steps) op.Start() re.Nil(op.Check(region)) @@ -132,9 +128,9 @@ func (suite *operatorTestSuite) TestOperator() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op = suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op = NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(constant.Medium, op.GetPriorityLevel()) - suite.checkSteps(re, op, steps) + checkSteps(re, op, steps) op.Start() re.Equal(RemovePeer{FromStore: 2}, op.Check(region)) re.Equal(int32(2), atomic.LoadInt32(&op.currentStep)) @@ -149,7 +145,7 @@ func (suite *operatorTestSuite) TestOperator() { // check short timeout for transfer leader only operators. steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} - op = suite.newTestOperator(1, OpLeader, steps...) + op = NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader, steps...) op.Start() re.False(op.CheckTimeout()) op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) @@ -166,7 +162,7 @@ func (suite *operatorTestSuite) TestOperator() { func (suite *operatorTestSuite) TestInfluence() { re := suite.Require() - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) opInfluence := OpInfluence{StoresInfluence: make(map[uint64]*StoreInfluence)} storeOpInfluence := opInfluence.StoresInfluence storeOpInfluence[1] = &StoreInfluence{} @@ -309,7 +305,7 @@ func (suite *operatorTestSuite) TestCheckSuccess() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.False(op.CheckSuccess()) re.True(op.Start()) @@ -324,7 +320,7 @@ func (suite *operatorTestSuite) TestCheckSuccess() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) op.currentStep = int32(len(op.steps)) re.Equal(CREATED, op.Status()) re.False(op.CheckSuccess()) @@ -342,7 +338,7 @@ func (suite *operatorTestSuite) TestCheckTimeout() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.True(op.Start()) op.currentStep = int32(len(op.steps)) @@ -355,7 +351,7 @@ func (suite *operatorTestSuite) TestCheckTimeout() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.True(op.Start()) op.currentStep = int32(len(op.steps)) @@ -372,7 +368,7 @@ func (suite *operatorTestSuite) TestStart() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(0, op.GetStartTime().Nanosecond()) re.Equal(CREATED, op.Status()) re.True(op.Start()) @@ -387,7 +383,7 @@ func (suite *operatorTestSuite) TestCheckExpired() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.False(op.CheckExpired()) re.Equal(CREATED, op.Status()) op.SetStatusReachTime(CREATED, time.Now().Add(-OperatorExpireTime)) @@ -398,30 +394,30 @@ func (suite *operatorTestSuite) TestCheckExpired() { func (suite *operatorTestSuite) TestCheck() { re := suite.Require() { - region := suite.newTestRegion(2, 2, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(2, 2, [2]uint64{1, 1}, [2]uint64{2, 2}) steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(2, OpLeader|OpRegion, steps...) + op := NewTestOperator(2, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) - region = suite.newTestRegion(1, 1, [2]uint64{1, 1}) + region = newTestRegion(1, 1, [2]uint64{1, 1}) re.Nil(op.Check(region)) re.Equal(SUCCESS, op.Status()) } { - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) @@ -430,18 +426,18 @@ func (suite *operatorTestSuite) TestCheck() { re.Equal(TIMEOUT, op.Status()) } { - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) op.status.setTime(STARTED, time.Now().Add(-SlowStepWaitTime)) - region = suite.newTestRegion(1, 1, [2]uint64{1, 1}) + region = newTestRegion(1, 1, [2]uint64{1, 1}) re.Nil(op.Check(region)) re.Equal(SUCCESS, op.Status()) } @@ -454,28 +450,28 @@ func (suite *operatorTestSuite) TestSchedulerKind() { expect OpKind }{ { - op: suite.newTestOperator(1, OpAdmin|OpMerge|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpAdmin|OpMerge|OpRegion), expect: OpAdmin, }, { - op: suite.newTestOperator(1, OpMerge|OpLeader|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpMerge|OpLeader|OpRegion), expect: OpMerge, }, { - op: suite.newTestOperator(1, OpReplica|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpReplica|OpRegion), expect: OpReplica, }, { - op: suite.newTestOperator(1, OpSplit|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpSplit|OpRegion), expect: OpSplit, }, { - op: suite.newTestOperator(1, OpRange|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpRange|OpRegion), expect: OpRange, }, { - op: suite.newTestOperator(1, OpHotRegion|OpLeader|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpHotRegion|OpLeader|OpRegion), expect: OpHotRegion, }, { - op: suite.newTestOperator(1, OpRegion|OpLeader), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion|OpLeader), expect: OpRegion, }, { - op: suite.newTestOperator(1, OpLeader), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader), expect: OpLeader, }, } @@ -534,7 +530,7 @@ func (suite *operatorTestSuite) TestOpStepTimeout() { func (suite *operatorTestSuite) TestRecord() { re := suite.Require() - operator := suite.newTestOperator(1, OpLeader, AddLearner{ToStore: 1, PeerID: 1}, RemovePeer{FromStore: 1, PeerID: 1}) + operator := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader, AddLearner{ToStore: 1, PeerID: 1}, RemovePeer{FromStore: 1, PeerID: 1}) now := time.Now() time.Sleep(time.Second) ob := operator.Record(now) @@ -548,7 +544,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { TransferLeader{FromStore: 3, ToStore: 1}, RemovePeer{FromStore: 3}, } - op := suite.newTestOperator(101, OpLeader|OpRegion, steps...) + op := NewTestOperator(101, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) op.Start() obj := op.ToJSONObject() suite.Equal("test", obj.Desc) @@ -559,7 +555,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { suite.Equal(STARTED, obj.Status) // Test SUCCESS status. - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) suite.Nil(op.Check(region)) suite.Equal(SUCCESS, op.Status()) obj = op.ToJSONObject() @@ -567,7 +563,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { // Test TIMEOUT status. steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} - op = suite.newTestOperator(1, OpLeader, steps...) + op = NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader, steps...) op.Start() op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) suite.True(op.CheckTimeout()) diff --git a/pkg/schedule/operator/status_tracker.go b/pkg/schedule/operator/status_tracker.go index e103a74ccb3..0ba8135750c 100644 --- a/pkg/schedule/operator/status_tracker.go +++ b/pkg/schedule/operator/status_tracker.go @@ -64,9 +64,8 @@ func (trk *OpStatusTracker) getTime(s OpStatus) time.Time { return trk.reachTimes[s] } else if trk.current == s { return trk.reachTimes[firstEndStatus] - } else { - return time.Time{} } + return time.Time{} } // To transfer the current status to dst if this transition is valid, diff --git a/pkg/schedule/operator/step.go b/pkg/schedule/operator/step.go index 6f14cbb326b..04e41028865 100644 --- a/pkg/schedule/operator/step.go +++ b/pkg/schedule/operator/step.go @@ -70,7 +70,7 @@ type TransferLeader struct { } // ConfVerChanged returns the delta value for version increased by this step. -func (tl TransferLeader) ConfVerChanged(_ *core.RegionInfo) uint64 { +func (TransferLeader) ConfVerChanged(_ *core.RegionInfo) uint64 { return 0 // transfer leader never change the conf version } @@ -122,12 +122,12 @@ func (tl TransferLeader) Influence(opInfluence OpInfluence, region *core.RegionI } // Timeout returns duration that current step may take. -func (tl TransferLeader) Timeout(regionSize int64) time.Duration { +func (TransferLeader) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } // GetCmd returns the schedule command for heartbeat response. -func (tl TransferLeader) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (tl TransferLeader) GetCmd(region *core.RegionInfo, _ bool) *hbstream.Operation { peers := make([]*metapb.Peer, 0, len(tl.ToStores)) for _, storeID := range tl.ToStores { peers = append(peers, region.GetStorePeer(storeID)) @@ -206,7 +206,7 @@ func (ap AddPeer) CheckInProgress(ci *core.BasicCluster, config config.SharedCon } // Timeout returns duration that current step may take. -func (ap AddPeer) Timeout(regionSize int64) time.Duration { +func (AddPeer) Timeout(regionSize int64) time.Duration { return slowStepWaitDuration(regionSize) } @@ -270,7 +270,7 @@ func (bw BecomeWitness) Influence(opInfluence OpInfluence, region *core.RegionIn } // Timeout returns duration that current step may take. -func (bw BecomeWitness) Timeout(regionSize int64) time.Duration { +func (BecomeWitness) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -338,12 +338,12 @@ func (bn BecomeNonWitness) Influence(opInfluence OpInfluence, region *core.Regio } // Timeout returns duration that current step may take. -func (bn BecomeNonWitness) Timeout(regionSize int64) time.Duration { +func (BecomeNonWitness) Timeout(regionSize int64) time.Duration { return slowStepWaitDuration(regionSize) } // GetCmd returns the schedule command for heartbeat response. -func (bn BecomeNonWitness) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (bn BecomeNonWitness) GetCmd(*core.RegionInfo, bool) *hbstream.Operation { return switchWitness(bn.PeerID, false) } @@ -518,7 +518,7 @@ func (al AddLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) } // Timeout returns duration that current step may take. -func (al AddLearner) Timeout(regionSize int64) time.Duration { +func (AddLearner) Timeout(regionSize int64) time.Duration { return slowStepWaitDuration(regionSize) } @@ -565,7 +565,7 @@ func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { peer := region.GetStorePeer(pl.ToStore) if peer.GetId() != pl.PeerID { return errors.New("peer does not exist") @@ -574,10 +574,10 @@ func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, config config.Sha } // Influence calculates the store difference that current step makes. -func (pl PromoteLearner) Influence(_ OpInfluence, _ *core.RegionInfo) {} +func (PromoteLearner) Influence(OpInfluence, *core.RegionInfo) {} // Timeout returns duration that current step may take. -func (pl PromoteLearner) Timeout(regionSize int64) time.Duration { +func (PromoteLearner) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -617,7 +617,7 @@ func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (rp RemovePeer) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (rp RemovePeer) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { if rp.FromStore == region.GetLeader().GetStoreId() { return errors.New("cannot remove leader peer") } @@ -648,7 +648,7 @@ func (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) } // Timeout returns duration that current step may take. -func (rp RemovePeer) Timeout(regionSize int64) time.Duration { +func (RemovePeer) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -674,7 +674,7 @@ type MergeRegion struct { } // ConfVerChanged returns the delta value for version increased by this step. -func (mr MergeRegion) ConfVerChanged(_ *core.RegionInfo) uint64 { +func (MergeRegion) ConfVerChanged(*core.RegionInfo) uint64 { return 0 } @@ -691,7 +691,7 @@ func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (mr MergeRegion) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, _ *core.RegionInfo) error { +func (MergeRegion) CheckInProgress(*core.BasicCluster, config.SharedConfigProvider, *core.RegionInfo) error { return nil } @@ -710,12 +710,12 @@ func (mr MergeRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo // Timeout returns duration that current step may take. // The merge step need more time to finish but less than slow step. -func (mr MergeRegion) Timeout(regionSize int64) time.Duration { +func (MergeRegion) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) * 10 } // GetCmd returns the schedule command for heartbeat response. -func (mr MergeRegion) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (mr MergeRegion) GetCmd(*core.RegionInfo, bool) *hbstream.Operation { if mr.IsPassive { return nil } @@ -734,7 +734,7 @@ type SplitRegion struct { } // ConfVerChanged returns the delta value for version increased by this step. -func (sr SplitRegion) ConfVerChanged(_ *core.RegionInfo) uint64 { +func (SplitRegion) ConfVerChanged(*core.RegionInfo) uint64 { return 0 } @@ -748,7 +748,7 @@ func (sr SplitRegion) IsFinish(region *core.RegionInfo) bool { } // Influence calculates the store difference that current step makes. -func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) { +func (SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) { for _, peer := range region.GetPeers() { inf := opInfluence.GetStoreInfluence(peer.GetStoreId()) inf.RegionCount++ @@ -759,17 +759,17 @@ func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo } // CheckInProgress checks if the step is in the progress of advancing. -func (sr SplitRegion) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, _ *core.RegionInfo) error { +func (SplitRegion) CheckInProgress(*core.BasicCluster, config.SharedConfigProvider, *core.RegionInfo) error { return nil } // Timeout returns duration that current step may take. -func (sr SplitRegion) Timeout(regionSize int64) time.Duration { +func (SplitRegion) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } // GetCmd returns the schedule command for heartbeat response. -func (sr SplitRegion) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (sr SplitRegion) GetCmd(*core.RegionInfo, bool) *hbstream.Operation { return &hbstream.Operation{ SplitRegion: &pdpb.SplitRegion{ Policy: sr.Policy, @@ -814,7 +814,7 @@ func (dv DemoteVoter) IsFinish(region *core.RegionInfo) bool { } // Timeout returns duration that current step may take. -func (dv DemoteVoter) Timeout(regionSize int64) time.Duration { +func (DemoteVoter) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -884,7 +884,7 @@ func (cpe ChangePeerV2Enter) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { inJointState, notInJointState := false, false for _, pl := range cpe.PromoteLearners { peer := region.GetStorePeer(pl.ToStore) @@ -932,7 +932,7 @@ func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, config config } // Influence calculates the store difference that current step makes. -func (cpe ChangePeerV2Enter) Influence(_ OpInfluence, _ *core.RegionInfo) {} +func (ChangePeerV2Enter) Influence(OpInfluence, *core.RegionInfo) {} // Timeout returns duration that current step may take. func (cpe ChangePeerV2Enter) Timeout(regionSize int64) time.Duration { @@ -1013,7 +1013,7 @@ func (cpl ChangePeerV2Leave) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { inJointState, notInJointState, demoteLeader := false, false, false leaderStoreID := region.GetLeader().GetStoreId() @@ -1072,7 +1072,7 @@ func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, config config } // Influence calculates the store difference that current step makes. -func (cpl ChangePeerV2Leave) Influence(_ OpInfluence, _ *core.RegionInfo) {} +func (ChangePeerV2Leave) Influence(OpInfluence, *core.RegionInfo) {} // Timeout returns duration that current step may take. func (cpl ChangePeerV2Leave) Timeout(regionSize int64) time.Duration { @@ -1081,7 +1081,7 @@ func (cpl ChangePeerV2Leave) Timeout(regionSize int64) time.Duration { } // GetCmd returns the schedule command for heartbeat response. -func (cpl ChangePeerV2Leave) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (ChangePeerV2Leave) GetCmd(_ *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { if !useConfChangeV2 { // only supported in ChangePeerV2 return nil diff --git a/pkg/schedule/operator/waiting_operator.go b/pkg/schedule/operator/waiting_operator.go index b3b1b885663..f75dcf25cd8 100644 --- a/pkg/schedule/operator/waiting_operator.go +++ b/pkg/schedule/operator/waiting_operator.go @@ -26,6 +26,7 @@ var priorityWeight = []float64{1.0, 4.0, 9.0, 16.0} // WaitingOperator is an interface of waiting operators. type WaitingOperator interface { PutOperator(op *Operator) + PutMergeOperators(op []*Operator) GetOperator() []*Operator ListOperator() []*Operator } @@ -66,6 +67,21 @@ func (b *randBuckets) PutOperator(op *Operator) { bucket.ops = append(bucket.ops, op) } +// PutMergeOperators puts two operators into the random buckets. +func (b *randBuckets) PutMergeOperators(ops []*Operator) { + b.mu.Lock() + defer b.mu.Unlock() + if len(ops) != 2 && (ops[0].Kind()&OpMerge == 0 || ops[1].Kind()&OpMerge == 0) { + return + } + priority := ops[0].GetPriorityLevel() + bucket := b.buckets[priority] + if len(bucket.ops) == 0 { + b.totalWeight += bucket.weight + } + bucket.ops = append(bucket.ops, ops...) +} + // ListOperator lists all operator in the random buckets. func (b *randBuckets) ListOperator() []*Operator { b.mu.Lock() diff --git a/pkg/schedule/placement/region_rule_cache_test.go b/pkg/schedule/placement/region_rule_cache_test.go index 835203bed26..e951ea10cc5 100644 --- a/pkg/schedule/placement/region_rule_cache_test.go +++ b/pkg/schedule/placement/region_rule_cache_test.go @@ -226,7 +226,7 @@ func (manager *RegionRuleFitCacheManager) mockRegionRuleFitCache(region *core.Re } } -// nolint +// nolint:unparam func mockStores(num int) []*core.StoreInfo { stores := make([]*core.StoreInfo, 0, num) now := time.Now() @@ -237,7 +237,6 @@ func mockStores(num int) []*core.StoreInfo { return stores } -// nolint func mockStoresNoHeartbeat(num int) []*core.StoreInfo { stores := make([]*core.StoreInfo, 0, num) for i := 1; i <= num; i++ { diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index e11e8492765..b4e6feb332c 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -164,7 +164,7 @@ func (handler *balanceLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http handler.rd.JSON(w, httpCode, v) } -func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -219,7 +219,7 @@ func (l *balanceLeaderScheduler) GetName() string { return l.name } -func (l *balanceLeaderScheduler) GetType() string { +func (*balanceLeaderScheduler) GetType() string { return BalanceLeaderType } @@ -553,7 +553,7 @@ func (l *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.Region.GetLeader().GetStoreId(), solver.TargetStoreID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.TargetStoreID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create balance leader operator", errs.ZapError(err)) if collector != nil { diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 1cef3a4615b..98e3be6e08a 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -96,7 +96,7 @@ func (s *balanceRegionScheduler) GetName() string { return s.conf.Name } -func (s *balanceRegionScheduler) GetType() string { +func (*balanceRegionScheduler) GetType() string { return BalanceRegionType } diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 68332d7067e..234acfd6d26 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -186,7 +186,7 @@ func TestTolerantRatio(t *testing.T) { kind constant.ScheduleKind expectTolerantResource func(constant.ScheduleKind) int64 }{ - {0, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(k constant.ScheduleKind) int64 { + {0, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(constant.ScheduleKind) int64 { return int64(leaderTolerantSizeRatio) }}, {0, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.BySize}, func(k constant.ScheduleKind) int64 { @@ -198,7 +198,7 @@ func TestTolerantRatio(t *testing.T) { {0, constant.ScheduleKind{Resource: constant.RegionKind, Policy: constant.BySize}, func(k constant.ScheduleKind) int64 { return int64(adjustTolerantRatio(tc, k) * float64(regionSize)) }}, - {10, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(k constant.ScheduleKind) int64 { + {10, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(constant.ScheduleKind) int64 { return int64(tc.GetScheduleConfig().TolerantSizeRatio) }}, {10, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.BySize}, func(k constant.ScheduleKind) int64 { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index aee112c9dc1..3c4776c4666 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -150,7 +150,7 @@ func (handler *balanceWitnessHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, httpCode, v) } -func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -214,7 +214,7 @@ func (b *balanceWitnessScheduler) GetName() string { return b.name } -func (b *balanceWitnessScheduler) GetType() string { +func (*balanceWitnessScheduler) GetType() string { return BalanceWitnessType } diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index f4c8c577767..f3772757ad3 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -68,32 +68,32 @@ func NewBaseScheduler(opController *operator.Controller) *BaseScheduler { return &BaseScheduler{OpController: opController} } -func (s *BaseScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (*BaseScheduler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { fmt.Fprintf(w, "not implements") } // GetMinInterval returns the minimal interval for the scheduler -func (s *BaseScheduler) GetMinInterval() time.Duration { +func (*BaseScheduler) GetMinInterval() time.Duration { return MinScheduleInterval } // EncodeConfig encode config for the scheduler -func (s *BaseScheduler) EncodeConfig() ([]byte, error) { +func (*BaseScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(nil) } // ReloadConfig reloads the config from the storage. // By default, the scheduler does not need to reload the config // if it doesn't support the dynamic configuration. -func (s *BaseScheduler) ReloadConfig() error { return nil } +func (*BaseScheduler) ReloadConfig() error { return nil } // GetNextInterval return the next interval for the scheduler -func (s *BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { +func (*BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { return intervalGrow(interval, MaxScheduleInterval, exponentialGrowth) } // PrepareConfig does some prepare work about config. -func (s *BaseScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { return nil } +func (*BaseScheduler) PrepareConfig(sche.SchedulerCluster) error { return nil } // CleanConfig does some cleanup work about config. -func (s *BaseScheduler) CleanConfig(cluster sche.SchedulerCluster) {} +func (*BaseScheduler) CleanConfig(sche.SchedulerCluster) {} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 5cd59583767..3750834a82d 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -118,7 +118,7 @@ func (conf *evictLeaderSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *evictLeaderSchedulerConfig) getSchedulerName() string { +func (*evictLeaderSchedulerConfig) getSchedulerName() string { return EvictLeaderName } @@ -190,11 +190,11 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (s *evictLeaderScheduler) GetName() string { +func (*evictLeaderScheduler) GetName() string { return EvictLeaderName } -func (s *evictLeaderScheduler) GetType() string { +func (*evictLeaderScheduler) GetType() string { return EvictLeaderType } @@ -251,7 +251,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } -func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize), nil } @@ -338,7 +338,7 @@ func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, co for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - op, err := operator.CreateTransferLeaderOperator(typ, cluster, region, region.GetLeader().GetStoreId(), target.GetID(), targetIDs, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpLeader) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue @@ -395,7 +395,7 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } -func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index aa48d0bc9e9..ab30b256823 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -177,7 +177,7 @@ func (handler *evictSlowStoreHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, http.StatusOK, "Config updated.") } -func (handler *evictSlowStoreHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictSlowStoreHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -192,11 +192,11 @@ func (s *evictSlowStoreScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (s *evictSlowStoreScheduler) GetName() string { +func (*evictSlowStoreScheduler) GetName() string { return EvictSlowStoreName } -func (s *evictSlowStoreScheduler) GetType() string { +func (*evictSlowStoreScheduler) GetType() string { return EvictSlowStoreType } @@ -280,7 +280,7 @@ func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return true } -func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictSlowStoreCounter.Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index d919c1c0f0a..da3dbc24e95 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -263,7 +263,7 @@ func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, http.StatusOK, "Config updated.") } -func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -274,7 +274,7 @@ type evictSlowTrendScheduler struct { handler http.Handler } -func (s *evictSlowTrendScheduler) GetNextInterval(interval time.Duration) time.Duration { +func (s *evictSlowTrendScheduler) GetNextInterval(time.Duration) time.Duration { var growthType intervalGrowthType // If it already found a slow node as candidate, the next interval should be shorter // to make the next scheduling as soon as possible. This adjustment will decrease the @@ -291,11 +291,11 @@ func (s *evictSlowTrendScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (s *evictSlowTrendScheduler) GetName() string { +func (*evictSlowTrendScheduler) GetName() string { return EvictSlowTrendName } -func (s *evictSlowTrendScheduler) GetType() string { +func (*evictSlowTrendScheduler) GetType() string { return EvictSlowTrendType } @@ -384,7 +384,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return allowed } -func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 262dfe73873..56ed7cd730e 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -108,7 +108,7 @@ func (conf *grantHotRegionSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *grantHotRegionSchedulerConfig) getSchedulerName() string { +func (*grantHotRegionSchedulerConfig) getSchedulerName() string { return GrantHotRegionName } @@ -148,11 +148,11 @@ func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHo return ret } -func (s *grantHotRegionScheduler) GetName() string { +func (*grantHotRegionScheduler) GetName() string { return GrantHotRegionName } -func (s *grantHotRegionScheduler) GetType() string { +func (*grantHotRegionScheduler) GetType() string { return GrantHotRegionType } @@ -256,7 +256,7 @@ func newGrantHotRegionHandler(config *grantHotRegionSchedulerConfig) http.Handle return router } -func (s *grantHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { grantHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -352,7 +352,7 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.SchedulerCluster, region dstStore := &metapb.Peer{StoreId: destStoreIDs[i]} if isLeader { - op, err = operator.CreateTransferLeaderOperator(GrantHotRegionType+"-leader", cluster, srcRegion, srcRegion.GetLeader().GetStoreId(), dstStore.StoreId, []uint64{}, operator.OpLeader) + op, err = operator.CreateTransferLeaderOperator(GrantHotRegionType+"-leader", cluster, srcRegion, dstStore.StoreId, []uint64{}, operator.OpLeader) } else { op, err = operator.CreateMovePeerOperator(GrantHotRegionType+"-move", cluster, srcRegion, operator.OpRegion|operator.OpLeader, srcStore.GetID(), dstStore) } diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 8d36a5ae1c3..5de898489d9 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -98,7 +98,7 @@ func (conf *grantLeaderSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *grantLeaderSchedulerConfig) getSchedulerName() string { +func (*grantLeaderSchedulerConfig) getSchedulerName() string { return GrantLeaderName } @@ -176,11 +176,11 @@ func (s *grantLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (s *grantLeaderScheduler) GetName() string { +func (*grantLeaderScheduler) GetName() string { return GrantLeaderName } -func (s *grantLeaderScheduler) GetType() string { +func (*grantLeaderScheduler) GetType() string { return GrantLeaderType } @@ -235,7 +235,7 @@ func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } -func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { grantLeaderCounter.Inc() storeIDWithRanges := s.conf.getStoreIDWithRanges() ops := make([]*operator.Operator, 0, len(storeIDWithRanges)) @@ -248,7 +248,7 @@ func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bo continue } - op, err := operator.CreateForceTransferLeaderOperator(GrantLeaderType, cluster, region, region.GetLeader().GetStoreId(), id, operator.OpLeader) + op, err := operator.CreateForceTransferLeaderOperator(GrantLeaderType, cluster, region, id, operator.OpLeader) if err != nil { log.Debug("fail to create grant leader operator", errs.ZapError(err)) continue @@ -306,7 +306,7 @@ func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } -func (handler *grantLeaderHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *grantLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 2a38ef399c8..b6293c2dac9 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -254,7 +254,7 @@ func (h *hotScheduler) GetName() string { return h.name } -func (h *hotScheduler) GetType() string { +func (*hotScheduler) GetType() string { return HotRegionType } @@ -306,11 +306,11 @@ func (h *hotScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.conf.ServeHTTP(w, r) } -func (h *hotScheduler) GetMinInterval() time.Duration { +func (*hotScheduler) GetMinInterval() time.Duration { return minHotScheduleInterval } -func (h *hotScheduler) GetNextInterval(interval time.Duration) time.Duration { +func (h *hotScheduler) GetNextInterval(time.Duration) time.Duration { return intervalGrow(h.GetMinInterval(), maxHotScheduleInterval, exponentialGrowth) } @@ -322,7 +322,7 @@ func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return allowed } -func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { hotSchedulerCounter.Inc() rw := h.randomRWType() return h.dispatch(rw, cluster), nil @@ -1193,7 +1193,7 @@ func (bs *balanceSolver) checkHistoryByPriorityAndToleranceAnyOf(loads [][]float }) } -func (bs *balanceSolver) checkByPriorityAndToleranceFirstOnly(loads []float64, f func(int) bool) bool { +func (bs *balanceSolver) checkByPriorityAndToleranceFirstOnly(_ []float64, f func(int) bool) bool { return f(bs.firstPriority) } @@ -1732,7 +1732,6 @@ func (bs *balanceSolver) createReadOperator(region *core.RegionInfo, srcStoreID, "transfer-hot-read-leader", bs, region, - srcStoreID, dstStoreID, []uint64{}, operator.OpHotRegion) @@ -1769,7 +1768,6 @@ func (bs *balanceSolver) createWriteOperator(region *core.RegionInfo, srcStoreID "transfer-hot-write-leader", bs, region, - srcStoreID, dstStoreID, []uint64{}, operator.OpHotRegion) diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index b336438830b..80d20ca65bb 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -375,7 +375,7 @@ func (conf *hotRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *http.R router.ServeHTTP(w, r) } -func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, r *http.Request) { +func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, _ *http.Request) { conf.RLock() defer conf.RUnlock() rd := render.New(render.Options{IndentJSON: true}) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 8b1893887db..5f6cca892ee 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -43,11 +43,11 @@ func init() { // TODO: remove this global variable in the future. // And use a function to create hot schduler for test. schedulePeerPr = 1.0 - RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { cfg := initHotRegionScheduleConfig() return newHotWriteScheduler(opController, cfg), nil }) - RegisterScheduler(utils.Read.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(utils.Read.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { return newHotReadScheduler(opController, initHotRegionScheduleConfig()), nil }) } @@ -138,7 +138,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { case movePeer: op, err = operator.CreateMovePeerOperator("move-peer-test", tc, region, operator.OpAdmin, 2, &metapb.Peer{Id: region.GetID()*10000 + 1, StoreId: 4}) case transferLeader: - op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 1, 2, []uint64{}, operator.OpAdmin) + op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 2, []uint64{}, operator.OpAdmin) } re.NoError(err) re.NotNil(op) diff --git a/pkg/schedule/schedulers/hot_region_v2.go b/pkg/schedule/schedulers/hot_region_v2.go index 40cb35cd16b..50016231cad 100644 --- a/pkg/schedule/schedulers/hot_region_v2.go +++ b/pkg/schedule/schedulers/hot_region_v2.go @@ -457,13 +457,13 @@ func (bs *balanceSolver) betterThanV2(old *solution) bool { if bs.cur.mainPeerStat != old.mainPeerStat { // We will firstly consider ensuring converge faster, secondly reduce oscillation if bs.resourceTy == writeLeader { - return bs.getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, + return getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority)) > 0 } - firstCmp := bs.getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, + firstCmp := getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority)) - secondCmp := bs.getRkCmpByPriorityV2(bs.secondPriority, bs.cur.secondScore, old.secondScore, + secondCmp := getRkCmpByPriorityV2(bs.secondPriority, bs.cur.secondScore, old.secondScore, bs.cur.getPeersRateFromCache(bs.secondPriority), old.getPeersRateFromCache(bs.secondPriority)) switch bs.cur.progressiveRank { case -4, -3, -2: // firstPriority @@ -482,7 +482,7 @@ func (bs *balanceSolver) betterThanV2(old *solution) bool { return false } -func (bs *balanceSolver) getRkCmpByPriorityV2(dim int, curScore, oldScore int, curPeersRate, oldPeersRate float64) int { +func getRkCmpByPriorityV2(dim int, curScore, oldScore int, curPeersRate, oldPeersRate float64) int { switch { case curScore > oldScore: return 1 diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index e22037703cc..6bca686404d 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -52,7 +52,7 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(BalanceLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceLeaderSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -80,7 +80,7 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(BalanceRegionType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceRegionSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -105,7 +105,7 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceWitnessType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(BalanceWitnessType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceWitnessSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -152,13 +152,13 @@ func schedulersRegister() { }) // evict slow store - RegisterSliceDecoderBuilder(EvictSlowStoreType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(EvictSlowStoreType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(EvictSlowStoreType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(EvictSlowStoreType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initEvictSlowStoreSchedulerConfig(storage) if err := decoder(conf); err != nil { return nil, err @@ -198,7 +198,7 @@ func schedulersRegister() { } }) - RegisterScheduler(GrantHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(GrantHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &grantHotRegionSchedulerConfig{StoreIDs: make([]uint64, 0), storage: storage} conf.cluster = opController.GetCluster() if err := decoder(conf); err != nil { @@ -208,13 +208,13 @@ func schedulersRegister() { }) // hot region - RegisterSliceDecoderBuilder(HotRegionType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(HotRegionType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(HotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(HotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initHotRegionScheduleConfig() var data map[string]any if err := decoder(&data); err != nil { @@ -286,7 +286,7 @@ func schedulersRegister() { } }) - RegisterScheduler(LabelType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(LabelType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &labelSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -311,7 +311,7 @@ func schedulersRegister() { } }) - RegisterScheduler(RandomMergeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(RandomMergeType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &randomMergeSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -340,7 +340,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ScatterRangeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ScatterRangeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &scatterRangeSchedulerConfig{ storage: storage, } @@ -374,7 +374,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ShuffleHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleHotRegionSchedulerConfig{Limit: uint64(1)} if err := decoder(conf); err != nil { return nil, err @@ -400,7 +400,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ShuffleLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleLeaderSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -425,7 +425,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ShuffleRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleRegionSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -434,13 +434,13 @@ func schedulersRegister() { }) // split bucket - RegisterSliceDecoderBuilder(SplitBucketType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(SplitBucketType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(SplitBucketType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(SplitBucketType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initSplitBucketConfig() if err := decoder(conf); err != nil { return nil, err @@ -450,24 +450,24 @@ func schedulersRegister() { }) // transfer witness leader - RegisterSliceDecoderBuilder(TransferWitnessLeaderType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(TransferWitnessLeaderType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(TransferWitnessLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(TransferWitnessLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { return newTransferWitnessLeaderScheduler(opController), nil }) // evict slow store by trend - RegisterSliceDecoderBuilder(EvictSlowTrendType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(EvictSlowTrendType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(EvictSlowTrendType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(EvictSlowTrendType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initEvictSlowTrendSchedulerConfig(storage) if err := decoder(conf); err != nil { return nil, err diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 90310bcf10e..24875e3e26a 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -68,7 +68,7 @@ func (s *labelScheduler) GetName() string { return s.conf.Name } -func (s *labelScheduler) GetType() string { +func (*labelScheduler) GetType() string { return LabelType } @@ -84,7 +84,7 @@ func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return allowed } -func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { labelCounter.Inc() stores := cluster.GetStores() rejectLeaderStores := make(map[uint64]struct{}) @@ -119,7 +119,7 @@ func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([ continue } - op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, id, target.GetID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, target.GetID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create transfer label reject leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 44bb5081ef9..7fec0bd9530 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -70,7 +70,7 @@ func (s *randomMergeScheduler) GetName() string { return s.conf.Name } -func (s *randomMergeScheduler) GetType() string { +func (*randomMergeScheduler) GetType() string { return RandomMergeType } @@ -86,7 +86,7 @@ func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } -func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). @@ -113,7 +113,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bo return nil, nil } - if !s.allowMerge(cluster, region, target) { + if !allowMerge(cluster, region, target) { randomMergeNotAllowedCounter.Inc() return nil, nil } @@ -129,7 +129,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bo return ops, nil } -func (s *randomMergeScheduler) allowMerge(cluster sche.SchedulerCluster, region, target *core.RegionInfo) bool { +func allowMerge(cluster sche.SchedulerCluster, region, target *core.RegionInfo) bool { if !filter.IsRegionHealthy(region) || !filter.IsRegionHealthy(target) { return false } diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 9ad9e597dfd..8a2f0a5398b 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -156,7 +156,7 @@ func (l *scatterRangeScheduler) GetName() string { return l.name } -func (l *scatterRangeScheduler) GetType() string { +func (*scatterRangeScheduler) GetType() string { return ScatterRangeType } @@ -206,7 +206,7 @@ func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.SchedulerCluster return allowed } -func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { scatterRangeCounter.Inc() // isolate a new cluster according to the key range c := genRangeCluster(cluster, l.config.GetStartKey(), l.config.GetEndKey()) @@ -282,7 +282,7 @@ func (handler *scatterRangeHandler) UpdateConfig(w http.ResponseWriter, r *http. handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *scatterRangeHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *scatterRangeHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index b74b72283ec..abace59a266 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -101,7 +101,7 @@ func ConfigJSONDecoder(data []byte) ConfigDecoder { func ConfigSliceDecoder(name string, args []string) ConfigDecoder { builder, ok := schedulerArgsToDecoder[name] if !ok { - return func(v any) error { + return func(any) error { return errors.Errorf("the config decoder do not register for %s", name) } } diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index a1448fbd041..726138e8f7a 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -114,7 +114,7 @@ func (s *shuffleHotRegionScheduler) GetName() string { return s.conf.Name } -func (s *shuffleHotRegionScheduler) GetType() string { +func (*shuffleHotRegionScheduler) GetType() string { return ShuffleHotRegionType } @@ -157,7 +157,7 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerClus return hotRegionAllowed && regionAllowed && leaderAllowed } -func (s *shuffleHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { shuffleHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -250,7 +250,7 @@ func (handler *shuffleHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *h handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *shuffleHotRegionHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *shuffleHotRegionHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index a6ff4baf65b..5b3dfd9fd20 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -71,7 +71,7 @@ func (s *shuffleLeaderScheduler) GetName() string { return s.conf.Name } -func (s *shuffleLeaderScheduler) GetType() string { +func (*shuffleLeaderScheduler) GetType() string { return ShuffleLeaderType } @@ -87,7 +87,7 @@ func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } -func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { // We shuffle leaders between stores by: // 1. random select a valid store. // 2. transfer a leader to the store. @@ -106,7 +106,7 @@ func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun shuffleLeaderNoFollowerCounter.Inc() return nil, nil } - op, err := operator.CreateTransferLeaderOperator(ShuffleLeaderType, cluster, region, region.GetLeader().GetId(), targetStore.GetID(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator(ShuffleLeaderType, cluster, region, targetStore.GetID(), []uint64{}, operator.OpAdmin) if err != nil { log.Debug("fail to create shuffle leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index f9bed18d3fa..b1a100384ae 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -68,11 +68,11 @@ func (s *shuffleRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reques s.conf.ServeHTTP(w, r) } -func (s *shuffleRegionScheduler) GetName() string { +func (*shuffleRegionScheduler) GetName() string { return ShuffleRegionName } -func (s *shuffleRegionScheduler) GetType() string { +func (*shuffleRegionScheduler) GetType() string { return ShuffleRegionType } @@ -107,7 +107,7 @@ func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } -func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { shuffleRegionCounter.Inc() region, oldPeer := s.scheduleRemovePeer(cluster) if region == nil { diff --git a/pkg/schedule/schedulers/shuffle_region_config.go b/pkg/schedule/schedulers/shuffle_region_config.go index 552d7ea8bce..bce64f743b8 100644 --- a/pkg/schedule/schedulers/shuffle_region_config.go +++ b/pkg/schedule/schedulers/shuffle_region_config.go @@ -77,7 +77,7 @@ func (conf *shuffleRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *ht router.ServeHTTP(w, r) } -func (conf *shuffleRegionSchedulerConfig) handleGetRoles(w http.ResponseWriter, r *http.Request) { +func (conf *shuffleRegionSchedulerConfig) handleGetRoles(w http.ResponseWriter, _ *http.Request) { rd := render.New(render.Options{IndentJSON: true}) rd.JSON(w, http.StatusOK, conf.GetRoles()) } diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 7e276402e49..609510446c7 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -175,12 +175,12 @@ func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucke } // GetName returns the name of the split bucket scheduler. -func (s *splitBucketScheduler) GetName() string { +func (*splitBucketScheduler) GetName() string { return SplitBucketName } // GetType returns the type of the split bucket scheduler. -func (s *splitBucketScheduler) GetType() string { +func (*splitBucketScheduler) GetType() string { return SplitBucketType } @@ -230,7 +230,7 @@ type splitBucketPlan struct { } // Schedule return operators if some bucket is too hot. -func (s *splitBucketScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *splitBucketScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { splitBucketScheduleCounter.Inc() conf := s.conf.Clone() plan := &splitBucketPlan{ diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index c651a8ef872..9ba78985d13 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -60,19 +60,19 @@ func newTransferWitnessLeaderScheduler(opController *operator.Controller) Schedu } } -func (s *transferWitnessLeaderScheduler) GetName() string { +func (*transferWitnessLeaderScheduler) GetName() string { return TransferWitnessLeaderName } -func (s *transferWitnessLeaderScheduler) GetType() string { +func (*transferWitnessLeaderScheduler) GetType() string { return TransferWitnessLeaderType } -func (s *transferWitnessLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { +func (*transferWitnessLeaderScheduler) IsScheduleAllowed(sche.SchedulerCluster) bool { return true } -func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil } @@ -83,7 +83,7 @@ batchLoop: for i := 0; i < batchSize; i++ { select { case region := <-s.regions: - op, err := s.scheduleTransferWitnessLeader(name, typ, cluster, region) + op, err := scheduleTransferWitnessLeader(name, typ, cluster, region) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) continue @@ -100,7 +100,7 @@ batchLoop: return ops } -func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { +func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { @@ -123,7 +123,7 @@ func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - return operator.CreateTransferLeaderOperator(typ, cluster, region, region.GetLeader().GetStoreId(), target.GetID(), targetIDs, operator.OpWitnessLeader) + return operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) } // RecvRegionInfo receives a checked region from coordinator diff --git a/pkg/schedule/splitter/region_splitter.go b/pkg/schedule/splitter/region_splitter.go index f0da8442a2c..aeab4b70cf0 100644 --- a/pkg/schedule/splitter/region_splitter.go +++ b/pkg/schedule/splitter/region_splitter.go @@ -108,6 +108,7 @@ func (r *RegionSplitter) splitRegionsByKeys(parCtx context.Context, splitKeys [] ticker.Stop() cancel() }() +outerLoop: for { select { case <-ticker.C: @@ -118,7 +119,7 @@ func (r *RegionSplitter) splitRegionsByKeys(parCtx context.Context, splitKeys [] r.handler.ScanRegionsByKeyRange(groupKeys, results) } case <-ctx.Done(): - break + break outerLoop } finished := true for _, groupKeys := range validGroups { diff --git a/pkg/schedule/splitter/region_splitter_test.go b/pkg/schedule/splitter/region_splitter_test.go index ebb8b225a9b..99fd53df1e5 100644 --- a/pkg/schedule/splitter/region_splitter_test.go +++ b/pkg/schedule/splitter/region_splitter_test.go @@ -37,7 +37,7 @@ func newMockSplitRegionsHandler() *mockSplitRegionsHandler { } // SplitRegionByKeys mock SplitRegionsHandler -func (m *mockSplitRegionsHandler) SplitRegionByKeys(region *core.RegionInfo, splitKeys [][]byte) error { +func (m *mockSplitRegionsHandler) SplitRegionByKeys(region *core.RegionInfo, _ [][]byte) error { m.regions[region.GetID()] = [2][]byte{ region.GetStartKey(), region.GetEndKey(), @@ -76,7 +76,7 @@ func (suite *regionSplitterTestSuite) SetupSuite() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) } -func (suite *regionSplitterTestSuite) TearDownTest() { +func (suite *regionSplitterTestSuite) TearDownSuite() { suite.cancel() } diff --git a/pkg/slice/slice_test.go b/pkg/slice/slice_test.go index 1fe3fe79dcf..019cd49c46a 100644 --- a/pkg/slice/slice_test.go +++ b/pkg/slice/slice_test.go @@ -22,7 +22,6 @@ import ( ) func TestSlice(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { a []int @@ -45,7 +44,6 @@ func TestSlice(t *testing.T) { } func TestSliceContains(t *testing.T) { - t.Parallel() re := require.New(t) ss := []string{"a", "b", "c"} re.True(slice.Contains(ss, "a")) @@ -61,7 +59,6 @@ func TestSliceContains(t *testing.T) { } func TestSliceRemoveGenericTypes(t *testing.T) { - t.Parallel() re := require.New(t) ss := []string{"a", "b", "c"} ss = slice.Remove(ss, "a") @@ -77,7 +74,6 @@ func TestSliceRemoveGenericTypes(t *testing.T) { } func TestSliceRemove(t *testing.T) { - t.Parallel() re := require.New(t) is := []int64{} diff --git a/pkg/statistics/buckets/hot_bucket_task.go b/pkg/statistics/buckets/hot_bucket_task.go index d6a43a6f8ae..ff7c30a7d81 100644 --- a/pkg/statistics/buckets/hot_bucket_task.go +++ b/pkg/statistics/buckets/hot_bucket_task.go @@ -55,7 +55,7 @@ func NewCheckPeerTask(buckets *metapb.Buckets) flowBucketsItemTask { } } -func (t *checkBucketsTask) taskType() flowItemTaskKind { +func (*checkBucketsTask) taskType() flowItemTaskKind { return checkBucketsTaskType } @@ -79,7 +79,7 @@ func NewCollectBucketStatsTask(minDegree int, regionIDs ...uint64) *collectBucke } } -func (t *collectBucketStatsTask) taskType() flowItemTaskKind { +func (*collectBucketStatsTask) taskType() flowItemTaskKind { return collectBucketStatsTaskType } diff --git a/pkg/statistics/collector.go b/pkg/statistics/collector.go index e64b673803d..88986b93d4b 100644 --- a/pkg/statistics/collector.go +++ b/pkg/statistics/collector.go @@ -36,11 +36,11 @@ func newTikvCollector() storeCollector { return tikvCollector{} } -func (c tikvCollector) Engine() string { +func (tikvCollector) Engine() string { return core.EngineTiKV } -func (c tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { +func (tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { if info.IsTiFlash() { return false } @@ -53,7 +53,7 @@ func (c tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind return false } -func (c tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { +func (tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { loads = make([]float64, utils.DimLen) switch rwTy { case utils.Read: @@ -87,11 +87,11 @@ func newTiFlashCollector(isTraceRegionFlow bool) storeCollector { return tiflashCollector{isTraceRegionFlow: isTraceRegionFlow} } -func (c tiflashCollector) Engine() string { +func (tiflashCollector) Engine() string { return core.EngineTiFlash } -func (c tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { +func (tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { switch kind { case constant.LeaderKind: return false diff --git a/pkg/statistics/hot_cache_task.go b/pkg/statistics/hot_cache_task.go index c84a292b4e7..fa224b522ff 100644 --- a/pkg/statistics/hot_cache_task.go +++ b/pkg/statistics/hot_cache_task.go @@ -146,7 +146,7 @@ func newCollectMetricsTask() *collectMetricsTask { return &collectMetricsTask{} } -func (t *collectMetricsTask) runTask(cache *hotPeerCache) { +func (*collectMetricsTask) runTask(cache *hotPeerCache) { cache.collectMetrics() } diff --git a/pkg/statistics/hot_peer_cache.go b/pkg/statistics/hot_peer_cache.go index 0e35e0e23be..cd27dcad4c8 100644 --- a/pkg/statistics/hot_peer_cache.go +++ b/pkg/statistics/hot_peer_cache.go @@ -451,7 +451,7 @@ func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt // For write stat, as the stat is send by region heartbeat, the first heartbeat will be skipped. // For read stat, as the stat is send by store heartbeat, the first heartbeat won't be skipped. if f.kind == utils.Write { - f.inheritItem(newItem, oldItem) + inheritItem(newItem, oldItem) return newItem } } else { @@ -465,25 +465,25 @@ func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt isFull := newItem.rollingLoads[0].isFull(f.interval()) // The intervals of dims are the same, so it is only necessary to determine whether any of them if !isFull { // not update hot degree and anti count - f.inheritItem(newItem, oldItem) + inheritItem(newItem, oldItem) } else { // If item is inCold, it means the pd didn't recv this item in the store heartbeat, // thus we make it colder if newItem.inCold { - f.coldItem(newItem, oldItem) + coldItem(newItem, oldItem) } else { thresholds := f.calcHotThresholds(newItem.StoreID) if f.isOldColdPeer(oldItem, newItem.StoreID) { if newItem.isHot(thresholds) { - f.initItem(newItem) + initItem(newItem, f.kind.DefaultAntiCount()) } else { newItem.actionType = utils.Remove } } else { if newItem.isHot(thresholds) { - f.hotItem(newItem, oldItem) + hotItem(newItem, oldItem, f.kind.DefaultAntiCount()) } else { - f.coldItem(newItem, oldItem) + coldItem(newItem, oldItem) } } } @@ -496,7 +496,7 @@ func (f *hotPeerCache) updateNewHotPeerStat(newItem *HotPeerStat, deltaLoads []f regionStats := f.kind.RegionStats() // interval is not 0 which is guaranteed by the caller. if interval.Seconds() >= float64(f.kind.ReportInterval()) { - f.initItem(newItem) + initItem(newItem, f.kind.DefaultAntiCount()) } newItem.actionType = utils.Add newItem.rollingLoads = make([]*dimStat, len(regionStats)) @@ -556,7 +556,7 @@ func (f *hotPeerCache) removeAllItem() { } } -func (f *hotPeerCache) coldItem(newItem, oldItem *HotPeerStat) { +func coldItem(newItem, oldItem *HotPeerStat) { newItem.HotDegree = oldItem.HotDegree - 1 newItem.AntiCount = oldItem.AntiCount - 1 if newItem.AntiCount <= 0 { @@ -566,9 +566,9 @@ func (f *hotPeerCache) coldItem(newItem, oldItem *HotPeerStat) { } } -func (f *hotPeerCache) hotItem(newItem, oldItem *HotPeerStat) { +func hotItem(newItem, oldItem *HotPeerStat, defaultAntiCount int) { newItem.HotDegree = oldItem.HotDegree + 1 - if oldItem.AntiCount < f.kind.DefaultAntiCount() { + if oldItem.AntiCount < defaultAntiCount { newItem.AntiCount = oldItem.AntiCount + 1 } else { newItem.AntiCount = oldItem.AntiCount @@ -576,13 +576,13 @@ func (f *hotPeerCache) hotItem(newItem, oldItem *HotPeerStat) { newItem.allowInherited = true } -func (f *hotPeerCache) initItem(item *HotPeerStat) { +func initItem(item *HotPeerStat, defaultAntiCount int) { item.HotDegree = 1 - item.AntiCount = f.kind.DefaultAntiCount() + item.AntiCount = defaultAntiCount item.allowInherited = true } -func (f *hotPeerCache) inheritItem(newItem, oldItem *HotPeerStat) { +func inheritItem(newItem, oldItem *HotPeerStat) { newItem.HotDegree = oldItem.HotDegree newItem.AntiCount = oldItem.AntiCount } diff --git a/pkg/statistics/slow_stat.go b/pkg/statistics/slow_stat.go index 4079043d154..cc579b3d90b 100644 --- a/pkg/statistics/slow_stat.go +++ b/pkg/statistics/slow_stat.go @@ -15,8 +15,6 @@ package statistics import ( - "context" - "github.com/tikv/pd/pkg/utils/syncutil" ) @@ -26,7 +24,7 @@ type SlowStat struct { } // NewSlowStat creates the container to hold slow nodes' statistics. -func NewSlowStat(ctx context.Context) *SlowStat { +func NewSlowStat() *SlowStat { return &SlowStat{ SlowStoresStats: NewSlowStoresStats(), } diff --git a/pkg/statistics/store_collection.go b/pkg/statistics/store_collection.go index aacd45338d1..4f76ffb0b5f 100644 --- a/pkg/statistics/store_collection.go +++ b/pkg/statistics/store_collection.go @@ -147,7 +147,7 @@ func (s *storeStatistics) Observe(store *core.StoreInfo) { } } -func (s *storeStatistics) ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { +func ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { // Store flows. storeAddress := store.GetAddress() id := strconv.FormatUint(store.GetID(), 10) @@ -309,10 +309,6 @@ func (m *storeStatisticsMap) Observe(store *core.StoreInfo) { m.stats.Observe(store) } -func (m *storeStatisticsMap) ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { - m.stats.ObserveHotStat(store, stats) -} - func (m *storeStatisticsMap) Collect() { m.stats.Collect() } diff --git a/pkg/statistics/store_collection_test.go b/pkg/statistics/store_collection_test.go index 02e6350ffa4..64a02a54bb4 100644 --- a/pkg/statistics/store_collection_test.go +++ b/pkg/statistics/store_collection_test.go @@ -68,7 +68,7 @@ func TestStoreStatistics(t *testing.T) { storeStats := NewStoreStatisticsMap(opt) for _, store := range stores { storeStats.Observe(store) - storeStats.ObserveHotStat(store, storesStats) + ObserveHotStat(store, storesStats) } stats := storeStats.stats diff --git a/pkg/storage/endpoint/keyspace.go b/pkg/storage/endpoint/keyspace.go index 77c81b2c8d6..30540e49a2e 100644 --- a/pkg/storage/endpoint/keyspace.go +++ b/pkg/storage/endpoint/keyspace.go @@ -48,7 +48,7 @@ type KeyspaceStorage interface { var _ KeyspaceStorage = (*StorageEndpoint)(nil) // SaveKeyspaceMeta adds a save keyspace meta operation to target transaction. -func (se *StorageEndpoint) SaveKeyspaceMeta(txn kv.Txn, meta *keyspacepb.KeyspaceMeta) error { +func (*StorageEndpoint) SaveKeyspaceMeta(txn kv.Txn, meta *keyspacepb.KeyspaceMeta) error { metaPath := KeyspaceMetaPath(meta.GetId()) metaVal, err := proto.Marshal(meta) if err != nil { @@ -59,7 +59,7 @@ func (se *StorageEndpoint) SaveKeyspaceMeta(txn kv.Txn, meta *keyspacepb.Keyspac // LoadKeyspaceMeta load and return keyspace meta specified by id. // If keyspace does not exist or error occurs, returned meta will be nil. -func (se *StorageEndpoint) LoadKeyspaceMeta(txn kv.Txn, id uint32) (*keyspacepb.KeyspaceMeta, error) { +func (*StorageEndpoint) LoadKeyspaceMeta(txn kv.Txn, id uint32) (*keyspacepb.KeyspaceMeta, error) { metaPath := KeyspaceMetaPath(id) metaVal, err := txn.Load(metaPath) if err != nil || metaVal == "" { @@ -74,7 +74,7 @@ func (se *StorageEndpoint) LoadKeyspaceMeta(txn kv.Txn, id uint32) (*keyspacepb. } // SaveKeyspaceID saves keyspace ID to the path specified by keyspace name. -func (se *StorageEndpoint) SaveKeyspaceID(txn kv.Txn, id uint32, name string) error { +func (*StorageEndpoint) SaveKeyspaceID(txn kv.Txn, id uint32, name string) error { idPath := KeyspaceIDPath(name) idVal := strconv.FormatUint(uint64(id), SpaceIDBase) return txn.Save(idPath, idVal) @@ -83,7 +83,7 @@ func (se *StorageEndpoint) SaveKeyspaceID(txn kv.Txn, id uint32, name string) er // LoadKeyspaceID loads keyspace ID from the path specified by keyspace name. // An additional boolean is returned to indicate whether target id exists, // it returns false if target id not found, or if error occurred. -func (se *StorageEndpoint) LoadKeyspaceID(txn kv.Txn, name string) (bool, uint32, error) { +func (*StorageEndpoint) LoadKeyspaceID(txn kv.Txn, name string) (bool, uint32, error) { idPath := KeyspaceIDPath(name) idVal, err := txn.Load(idPath) // Failed to load the keyspaceID if loading operation errored, or if keyspace does not exist. @@ -99,7 +99,7 @@ func (se *StorageEndpoint) LoadKeyspaceID(txn kv.Txn, name string) (bool, uint32 // LoadRangeKeyspace loads keyspaces starting at startID. // limit specifies the limit of loaded keyspaces. -func (se *StorageEndpoint) LoadRangeKeyspace(txn kv.Txn, startID uint32, limit int) ([]*keyspacepb.KeyspaceMeta, error) { +func (*StorageEndpoint) LoadRangeKeyspace(txn kv.Txn, startID uint32, limit int) ([]*keyspacepb.KeyspaceMeta, error) { startKey := KeyspaceMetaPath(startID) endKey := clientv3.GetPrefixRangeEnd(KeyspaceMetaPrefix()) keys, values, err := txn.LoadRange(startKey, endKey, limit) diff --git a/pkg/storage/endpoint/meta.go b/pkg/storage/endpoint/meta.go index d83e2b386c8..33482da512f 100644 --- a/pkg/storage/endpoint/meta.go +++ b/pkg/storage/endpoint/meta.go @@ -236,7 +236,7 @@ func (se *StorageEndpoint) DeleteRegion(region *metapb.Region) error { } // Flush flushes the pending data to the underlying storage backend. -func (se *StorageEndpoint) Flush() error { return nil } +func (*StorageEndpoint) Flush() error { return nil } // Close closes the underlying storage backend. -func (se *StorageEndpoint) Close() error { return nil } +func (*StorageEndpoint) Close() error { return nil } diff --git a/pkg/storage/endpoint/rule.go b/pkg/storage/endpoint/rule.go index d0092e8e303..84ad6ee1352 100644 --- a/pkg/storage/endpoint/rule.go +++ b/pkg/storage/endpoint/rule.go @@ -44,12 +44,12 @@ type RuleStorage interface { var _ RuleStorage = (*StorageEndpoint)(nil) // SaveRule stores a rule cfg to the rulesPath. -func (se *StorageEndpoint) SaveRule(txn kv.Txn, ruleKey string, rule any) error { +func (*StorageEndpoint) SaveRule(txn kv.Txn, ruleKey string, rule any) error { return saveJSONInTxn(txn, ruleKeyPath(ruleKey), rule) } // DeleteRule removes a rule from storage. -func (se *StorageEndpoint) DeleteRule(txn kv.Txn, ruleKey string) error { +func (*StorageEndpoint) DeleteRule(txn kv.Txn, ruleKey string) error { return txn.Remove(ruleKeyPath(ruleKey)) } @@ -59,12 +59,12 @@ func (se *StorageEndpoint) LoadRuleGroups(f func(k, v string)) error { } // SaveRuleGroup stores a rule group config to storage. -func (se *StorageEndpoint) SaveRuleGroup(txn kv.Txn, groupID string, group any) error { +func (*StorageEndpoint) SaveRuleGroup(txn kv.Txn, groupID string, group any) error { return saveJSONInTxn(txn, ruleGroupIDPath(groupID), group) } // DeleteRuleGroup removes a rule group from storage. -func (se *StorageEndpoint) DeleteRuleGroup(txn kv.Txn, groupID string) error { +func (*StorageEndpoint) DeleteRuleGroup(txn kv.Txn, groupID string) error { return txn.Remove(ruleGroupIDPath(groupID)) } @@ -74,12 +74,12 @@ func (se *StorageEndpoint) LoadRegionRules(f func(k, v string)) error { } // SaveRegionRule saves a region rule to the storage. -func (se *StorageEndpoint) SaveRegionRule(txn kv.Txn, ruleKey string, rule any) error { +func (*StorageEndpoint) SaveRegionRule(txn kv.Txn, ruleKey string, rule any) error { return saveJSONInTxn(txn, regionLabelKeyPath(ruleKey), rule) } // DeleteRegionRule removes a region rule from storage. -func (se *StorageEndpoint) DeleteRegionRule(txn kv.Txn, ruleKey string) error { +func (*StorageEndpoint) DeleteRegionRule(txn kv.Txn, ruleKey string) error { return txn.Remove(regionLabelKeyPath(ruleKey)) } diff --git a/pkg/storage/endpoint/tso_keyspace_group.go b/pkg/storage/endpoint/tso_keyspace_group.go index 39a08afe937..ba322336feb 100644 --- a/pkg/storage/endpoint/tso_keyspace_group.go +++ b/pkg/storage/endpoint/tso_keyspace_group.go @@ -163,7 +163,7 @@ type KeyspaceGroupStorage interface { var _ KeyspaceGroupStorage = (*StorageEndpoint)(nil) // LoadKeyspaceGroup loads the keyspace group by ID. -func (se *StorageEndpoint) LoadKeyspaceGroup(txn kv.Txn, id uint32) (*KeyspaceGroup, error) { +func (*StorageEndpoint) LoadKeyspaceGroup(txn kv.Txn, id uint32) (*KeyspaceGroup, error) { value, err := txn.Load(KeyspaceGroupIDPath(id)) if err != nil || value == "" { return nil, err @@ -176,12 +176,12 @@ func (se *StorageEndpoint) LoadKeyspaceGroup(txn kv.Txn, id uint32) (*KeyspaceGr } // SaveKeyspaceGroup saves the keyspace group. -func (se *StorageEndpoint) SaveKeyspaceGroup(txn kv.Txn, kg *KeyspaceGroup) error { +func (*StorageEndpoint) SaveKeyspaceGroup(txn kv.Txn, kg *KeyspaceGroup) error { return saveJSONInTxn(txn, KeyspaceGroupIDPath(kg.ID), kg) } // DeleteKeyspaceGroup deletes the keyspace group. -func (se *StorageEndpoint) DeleteKeyspaceGroup(txn kv.Txn, id uint32) error { +func (*StorageEndpoint) DeleteKeyspaceGroup(txn kv.Txn, id uint32) error { return txn.Remove(KeyspaceGroupIDPath(id)) } diff --git a/pkg/syncer/client_test.go b/pkg/syncer/client_test.go index 84193ebaffe..e7be77d2bb0 100644 --- a/pkg/syncer/client_test.go +++ b/pkg/syncer/client_test.go @@ -91,7 +91,7 @@ func (s *mockServer) LoopContext() context.Context { return s.ctx } -func (s *mockServer) ClusterID() uint64 { +func (*mockServer) ClusterID() uint64 { return 1 } @@ -107,7 +107,7 @@ func (s *mockServer) GetStorage() storage.Storage { return s.storage } -func (s *mockServer) Name() string { +func (*mockServer) Name() string { return "mock-server" } @@ -115,7 +115,7 @@ func (s *mockServer) GetRegions() []*core.RegionInfo { return s.bc.GetRegions() } -func (s *mockServer) GetTLSConfig() *grpcutil.TLSConfig { +func (*mockServer) GetTLSConfig() *grpcutil.TLSConfig { return &grpcutil.TLSConfig{} } diff --git a/pkg/tso/global_allocator.go b/pkg/tso/global_allocator.go index a37bcc73881..f90dc5f26fe 100644 --- a/pkg/tso/global_allocator.go +++ b/pkg/tso/global_allocator.go @@ -187,7 +187,7 @@ func (gta *GlobalTSOAllocator) Initialize(int) error { gta.tsoAllocatorRoleGauge.Set(1) // The suffix of a Global TSO should always be 0. gta.timestampOracle.suffix = 0 - return gta.timestampOracle.SyncTimestamp(gta.member.GetLeadership()) + return gta.timestampOracle.SyncTimestamp() } // IsInitialize is used to indicates whether this allocator is initialized. @@ -197,7 +197,7 @@ func (gta *GlobalTSOAllocator) IsInitialize() bool { // UpdateTSO is used to update the TSO in memory and the time window in etcd. func (gta *GlobalTSOAllocator) UpdateTSO() error { - return gta.timestampOracle.UpdateTimestamp(gta.member.GetLeadership()) + return gta.timestampOracle.UpdateTimestamp() } // SetTSO sets the physical part with given TSO. diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index d259ab27a5b..d1e94d445cc 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -674,7 +674,7 @@ func (kgm *KeyspaceGroupManager) isAssignedToMe(group *endpoint.KeyspaceGroup) b // updateKeyspaceGroup applies the given keyspace group. If the keyspace group is just assigned to // this host/pod, it will join the primary election. func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGroup) { - if err := kgm.checkKeySpaceGroupID(group.ID); err != nil { + if err := checkKeySpaceGroupID(group.ID); err != nil { log.Warn("keyspace group ID is invalid, ignore it", zap.Error(err)) return } @@ -751,7 +751,7 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro kgm.groupUpdateRetryList[group.ID] = group return } - participant.SetCampaignChecker(func(leadership *election.Leadership) bool { + participant.SetCampaignChecker(func(*election.Leadership) bool { return splitSourceAM.GetMember().IsLeader() }) } @@ -997,7 +997,7 @@ func (kgm *KeyspaceGroupManager) exitElectionMembership(group *endpoint.Keyspace // GetAllocatorManager returns the AllocatorManager of the given keyspace group func (kgm *KeyspaceGroupManager) GetAllocatorManager(keyspaceGroupID uint32) (*AllocatorManager, error) { - if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { + if err := checkKeySpaceGroupID(keyspaceGroupID); err != nil { return nil, err } if am, _ := kgm.getKeyspaceGroupMeta(keyspaceGroupID); am != nil { @@ -1022,7 +1022,7 @@ func (kgm *KeyspaceGroupManager) FindGroupByKeyspaceID( func (kgm *KeyspaceGroupManager) GetElectionMember( keyspaceID, keyspaceGroupID uint32, ) (ElectionMember, error) { - if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { + if err := checkKeySpaceGroupID(keyspaceGroupID); err != nil { return nil, err } am, _, _, err := kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, keyspaceGroupID) @@ -1052,7 +1052,7 @@ func (kgm *KeyspaceGroupManager) HandleTSORequest( keyspaceID, keyspaceGroupID uint32, dcLocation string, count uint32, ) (ts pdpb.Timestamp, curKeyspaceGroupID uint32, err error) { - if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { + if err := checkKeySpaceGroupID(keyspaceGroupID); err != nil { return pdpb.Timestamp{}, keyspaceGroupID, err } am, _, curKeyspaceGroupID, err := kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, keyspaceGroupID) @@ -1086,7 +1086,7 @@ func (kgm *KeyspaceGroupManager) HandleTSORequest( return ts, curKeyspaceGroupID, err } -func (kgm *KeyspaceGroupManager) checkKeySpaceGroupID(id uint32) error { +func checkKeySpaceGroupID(id uint32) error { if id < mcsutils.MaxKeyspaceGroupCountInUse { return nil } diff --git a/pkg/tso/local_allocator.go b/pkg/tso/local_allocator.go index 45c200ca566..e9019bf2bf3 100644 --- a/pkg/tso/local_allocator.go +++ b/pkg/tso/local_allocator.go @@ -101,7 +101,7 @@ func (lta *LocalTSOAllocator) GetDCLocation() string { func (lta *LocalTSOAllocator) Initialize(suffix int) error { lta.tsoAllocatorRoleGauge.Set(1) lta.timestampOracle.suffix = suffix - return lta.timestampOracle.SyncTimestamp(lta.leadership) + return lta.timestampOracle.SyncTimestamp() } // IsInitialize is used to indicates whether this allocator is initialized. @@ -112,7 +112,7 @@ func (lta *LocalTSOAllocator) IsInitialize() bool { // UpdateTSO is used to update the TSO in memory and the time window in etcd // for all local TSO allocators this PD server hold. func (lta *LocalTSOAllocator) UpdateTSO() error { - return lta.timestampOracle.UpdateTimestamp(lta.leadership) + return lta.timestampOracle.UpdateTimestamp() } // SetTSO sets the physical part with given TSO. diff --git a/pkg/tso/tso.go b/pkg/tso/tso.go index 5ad786678c4..bcb3169e73c 100644 --- a/pkg/tso/tso.go +++ b/pkg/tso/tso.go @@ -156,7 +156,7 @@ func (t *timestampOracle) GetTimestampPath() string { } // SyncTimestamp is used to synchronize the timestamp. -func (t *timestampOracle) SyncTimestamp(leadership *election.Leadership) error { +func (t *timestampOracle) SyncTimestamp() error { log.Info("start to sync timestamp", logutil.CondUint32("keyspace-group-id", t.keyspaceGroupID, t.keyspaceGroupID > 0)) t.metrics.syncEvent.Inc() @@ -311,7 +311,7 @@ func (t *timestampOracle) resetUserTimestampInner(leadership *election.Leadershi // // NOTICE: this function should be called after the TSO in memory has been initialized // and should not be called when the TSO in memory has been reset anymore. -func (t *timestampOracle) UpdateTimestamp(leadership *election.Leadership) error { +func (t *timestampOracle) UpdateTimestamp() error { if !t.isInitialized() { return errs.ErrUpdateTimestamp.FastGenByArgs("timestamp in memory has not been initialized") } diff --git a/pkg/utils/apiutil/apiutil.go b/pkg/utils/apiutil/apiutil.go index c762245321e..d0745ada271 100644 --- a/pkg/utils/apiutil/apiutil.go +++ b/pkg/utils/apiutil/apiutil.go @@ -441,16 +441,15 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) log.Error("request failed", errs.ZapError(errs.ErrSendRequest, err)) continue } - defer resp.Body.Close() var reader io.ReadCloser switch resp.Header.Get("Content-Encoding") { case "gzip": reader, err = gzip.NewReader(resp.Body) if err != nil { log.Error("failed to parse response with gzip compress", zap.Error(err)) + resp.Body.Close() continue } - defer reader.Close() default: reader = resp.Body } @@ -474,6 +473,8 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) break } } + resp.Body.Close() + reader.Close() if err != nil { log.Error("write failed", errs.ZapError(errs.ErrWriteHTTPBody, err), zap.String("target-address", url.String())) // try next url. diff --git a/pkg/utils/apiutil/apiutil_test.go b/pkg/utils/apiutil/apiutil_test.go index 106d3fb21cb..aee21621dd2 100644 --- a/pkg/utils/apiutil/apiutil_test.go +++ b/pkg/utils/apiutil/apiutil_test.go @@ -26,7 +26,6 @@ import ( ) func TestJsonRespondErrorOk(t *testing.T) { - t.Parallel() re := require.New(t) rd := render.New(render.Options{ IndentJSON: true, @@ -45,7 +44,6 @@ func TestJsonRespondErrorOk(t *testing.T) { } func TestJsonRespondErrorBadInput(t *testing.T) { - t.Parallel() re := require.New(t) rd := render.New(render.Options{ IndentJSON: true, @@ -71,7 +69,6 @@ func TestJsonRespondErrorBadInput(t *testing.T) { } func TestGetIPPortFromHTTPRequest(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { diff --git a/pkg/utils/assertutil/assertutil_test.go b/pkg/utils/assertutil/assertutil_test.go index 84bd21cef05..076cdd2ac93 100644 --- a/pkg/utils/assertutil/assertutil_test.go +++ b/pkg/utils/assertutil/assertutil_test.go @@ -22,7 +22,6 @@ import ( ) func TestNilFail(t *testing.T) { - t.Parallel() re := require.New(t) var failErr error checker := NewChecker() diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index e02615b695f..6ddeafe4573 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -438,7 +438,7 @@ func (suite *loopWatcherTestSuite) TestLoadNoExistedKey() { cache[string(kv.Key)] = struct{}{} return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, false, /* withPrefix */ ) @@ -466,7 +466,7 @@ func (suite *loopWatcherTestSuite) TestLoadWithLimitChange() { cache[string(kv.Key)] = struct{}{} return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, true, /* withPrefix */ ) @@ -559,7 +559,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { cache = append(cache, string(kv.Key)) return nil }, - func(kv *mvccpb.KeyValue) error { + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { @@ -598,7 +598,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLargeKey() { cache = append(cache, string(kv.Key)) return nil }, - func(kv *mvccpb.KeyValue) error { + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { @@ -641,7 +641,7 @@ func (suite *loopWatcherTestSuite) TestWatcherBreak() { } return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, false, /* withPrefix */ ) @@ -719,8 +719,8 @@ func (suite *loopWatcherTestSuite) TestWatcherRequestProgress() { "test", "TestWatcherChanBlock", func([]*clientv3.Event) error { return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, false, /* withPrefix */ ) diff --git a/pkg/utils/etcdutil/health_checker.go b/pkg/utils/etcdutil/health_checker.go index 51c1808de4a..44bddd8b183 100644 --- a/pkg/utils/etcdutil/health_checker.go +++ b/pkg/utils/etcdutil/health_checker.go @@ -146,7 +146,7 @@ func (checker *healthChecker) inspector(ctx context.Context) { } func (checker *healthChecker) close() { - checker.healthyClients.Range(func(key, value any) bool { + checker.healthyClients.Range(func(_, value any) bool { healthyCli := value.(*healthyClient) healthyCli.healthState.Set(0) healthyCli.Client.Close() @@ -382,7 +382,7 @@ func (checker *healthChecker) update() { } } // Clean up the stale clients which are not in the etcd cluster anymore. - checker.healthyClients.Range(func(key, value any) bool { + checker.healthyClients.Range(func(key, _ any) bool { ep := key.(string) if _, ok := epMap[ep]; !ok { log.Info("remove stale etcd client", diff --git a/pkg/utils/grpcutil/grpcutil_test.go b/pkg/utils/grpcutil/grpcutil_test.go index 21b7e1a4acb..2cbff4f3ebc 100644 --- a/pkg/utils/grpcutil/grpcutil_test.go +++ b/pkg/utils/grpcutil/grpcutil_test.go @@ -37,7 +37,6 @@ func TestToTLSConfig(t *testing.T) { } }() - t.Parallel() re := require.New(t) tlsConfig := TLSConfig{ KeyPath: path.Join(certPath, "pd-server-key.pem"), diff --git a/pkg/utils/jsonutil/jsonutil_test.go b/pkg/utils/jsonutil/jsonutil_test.go index a046fbaf70a..1e8c21917ba 100644 --- a/pkg/utils/jsonutil/jsonutil_test.go +++ b/pkg/utils/jsonutil/jsonutil_test.go @@ -31,7 +31,6 @@ type testJSONStructLevel2 struct { } func TestJSONUtil(t *testing.T) { - t.Parallel() re := require.New(t) father := &testJSONStructLevel1{ Name: "father", diff --git a/pkg/utils/keyutil/util_test.go b/pkg/utils/keyutil/util_test.go index 374faa1f797..7bcb0a49c6f 100644 --- a/pkg/utils/keyutil/util_test.go +++ b/pkg/utils/keyutil/util_test.go @@ -21,7 +21,6 @@ import ( ) func TestKeyUtil(t *testing.T) { - t.Parallel() re := require.New(t) startKey := []byte("a") endKey := []byte("b") diff --git a/pkg/utils/logutil/log.go b/pkg/utils/logutil/log.go index 8c0977818fa..ff6ffa7af9a 100644 --- a/pkg/utils/logutil/log.go +++ b/pkg/utils/logutil/log.go @@ -149,7 +149,7 @@ type stringer struct { } // String implement fmt.Stringer -func (s stringer) String() string { +func (stringer) String() string { return "?" } diff --git a/pkg/utils/logutil/log_test.go b/pkg/utils/logutil/log_test.go index 7d4be7a88bd..650ba62fe9d 100644 --- a/pkg/utils/logutil/log_test.go +++ b/pkg/utils/logutil/log_test.go @@ -23,7 +23,6 @@ import ( ) func TestStringToZapLogLevel(t *testing.T) { - t.Parallel() re := require.New(t) re.Equal(zapcore.FatalLevel, StringToZapLogLevel("fatal")) re.Equal(zapcore.ErrorLevel, StringToZapLogLevel("ERROR")) @@ -35,7 +34,6 @@ func TestStringToZapLogLevel(t *testing.T) { } func TestRedactLog(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { name string diff --git a/pkg/utils/metricutil/metricutil_test.go b/pkg/utils/metricutil/metricutil_test.go index b817eb0112d..a5c183abc20 100644 --- a/pkg/utils/metricutil/metricutil_test.go +++ b/pkg/utils/metricutil/metricutil_test.go @@ -23,7 +23,6 @@ import ( ) func TestCamelCaseToSnakeCase(t *testing.T) { - t.Parallel() re := require.New(t) inputs := []struct { name string @@ -56,7 +55,7 @@ func TestCamelCaseToSnakeCase(t *testing.T) { } } -func TestCoverage(t *testing.T) { +func TestCoverage(_ *testing.T) { cfgs := []*MetricConfig{ { PushJob: "j1", diff --git a/pkg/utils/netutil/address_test.go b/pkg/utils/netutil/address_test.go index faa3e2e1d04..127c9a6d0f7 100644 --- a/pkg/utils/netutil/address_test.go +++ b/pkg/utils/netutil/address_test.go @@ -22,7 +22,6 @@ import ( ) func TestResolveLoopBackAddr(t *testing.T) { - t.Parallel() re := require.New(t) nodes := []struct { address string @@ -40,7 +39,6 @@ func TestResolveLoopBackAddr(t *testing.T) { } func TestIsEnableHttps(t *testing.T) { - t.Parallel() re := require.New(t) re.False(IsEnableHTTPS(http.DefaultClient)) httpClient := &http.Client{ diff --git a/pkg/utils/reflectutil/tag_test.go b/pkg/utils/reflectutil/tag_test.go index f613f1f81b6..3e49e093912 100644 --- a/pkg/utils/reflectutil/tag_test.go +++ b/pkg/utils/reflectutil/tag_test.go @@ -35,7 +35,6 @@ type testStruct3 struct { } func TestFindJSONFullTagByChildTag(t *testing.T) { - t.Parallel() re := require.New(t) key := "enable" result := FindJSONFullTagByChildTag(reflect.TypeOf(testStruct1{}), key) @@ -51,7 +50,6 @@ func TestFindJSONFullTagByChildTag(t *testing.T) { } func TestFindSameFieldByJSON(t *testing.T) { - t.Parallel() re := require.New(t) input := map[string]any{ "name": "test2", @@ -65,7 +63,6 @@ func TestFindSameFieldByJSON(t *testing.T) { } func TestFindFieldByJSONTag(t *testing.T) { - t.Parallel() re := require.New(t) t1 := testStruct1{} t2 := testStruct2{} diff --git a/pkg/utils/requestutil/context_test.go b/pkg/utils/requestutil/context_test.go index 298fc1ff8a3..e6bdcd7be46 100644 --- a/pkg/utils/requestutil/context_test.go +++ b/pkg/utils/requestutil/context_test.go @@ -24,7 +24,6 @@ import ( ) func TestRequestInfo(t *testing.T) { - t.Parallel() re := require.New(t) ctx := context.Background() _, ok := RequestInfoFrom(ctx) @@ -53,7 +52,6 @@ func TestRequestInfo(t *testing.T) { } func TestEndTime(t *testing.T) { - t.Parallel() re := require.New(t) ctx := context.Background() _, ok := EndTimeFrom(ctx) diff --git a/pkg/utils/tempurl/check_env_dummy.go b/pkg/utils/tempurl/check_env_dummy.go index 85f527ea6fe..58d889bbfd6 100644 --- a/pkg/utils/tempurl/check_env_dummy.go +++ b/pkg/utils/tempurl/check_env_dummy.go @@ -16,6 +16,6 @@ package tempurl -func environmentCheck(addr string) bool { +func environmentCheck(_ string) bool { return true } diff --git a/pkg/utils/testutil/api_check.go b/pkg/utils/testutil/api_check.go index 5356d18514b..0b714204500 100644 --- a/pkg/utils/testutil/api_check.go +++ b/pkg/utils/testutil/api_check.go @@ -100,7 +100,8 @@ func ReadGetJSONWithBody(re *require.Assertions, client *http.Client, url string if err != nil { return err } - return checkResp(resp, StatusOK(re), ExtractJSON(re, data)) + checkOpts = append(checkOpts, StatusOK(re), ExtractJSON(re, data)) + return checkResp(resp, checkOpts...) } // CheckPostJSON is used to do post request and do check options. diff --git a/pkg/utils/tsoutil/tso_dispatcher.go b/pkg/utils/tsoutil/tso_dispatcher.go index 6d1ee2ace28..9dfb2515dc1 100644 --- a/pkg/utils/tsoutil/tso_dispatcher.go +++ b/pkg/utils/tsoutil/tso_dispatcher.go @@ -128,7 +128,7 @@ func (s *TSODispatcher) dispatch( case <-dispatcherCtx.Done(): return } - err = s.processRequests(forwardStream, requests[:pendingTSOReqCount], tsoProtoFactory) + err = s.processRequests(forwardStream, requests[:pendingTSOReqCount]) close(done) if err != nil { log.Error("proxy forward tso error", @@ -155,7 +155,7 @@ func (s *TSODispatcher) dispatch( } } -func (s *TSODispatcher) processRequests(forwardStream stream, requests []Request, tsoProtoFactory ProtoFactory) error { +func (s *TSODispatcher) processRequests(forwardStream stream, requests []Request) error { // Merge the requests count := uint32(0) for _, request := range requests { @@ -163,7 +163,7 @@ func (s *TSODispatcher) processRequests(forwardStream stream, requests []Request } start := time.Now() - resp, err := requests[0].process(forwardStream, count, tsoProtoFactory) + resp, err := requests[0].process(forwardStream, count) if err != nil { return err } @@ -184,7 +184,7 @@ func addLogical(logical, count int64, suffixBits uint32) int64 { return logical + count<&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c $(ROOT_PATH)/.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config $(ROOT_PATH)/revive.toml ./... tidy: @ go mod tidy diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index da4be99638d..10be418c029 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -114,7 +114,7 @@ func TestClientLeaderChange(t *testing.T) { for i := range endpointsWithWrongURL { endpointsWithWrongURL[i] = "https://" + strings.TrimPrefix(endpointsWithWrongURL[i], "http://") } - cli := setupCli(re, ctx, endpointsWithWrongURL) + cli := setupCli(ctx, re, endpointsWithWrongURL) defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) @@ -175,7 +175,7 @@ func TestLeaderTransferAndMoveCluster(t *testing.T) { }() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() var lastTS uint64 @@ -287,7 +287,7 @@ func TestTSOAllocatorLeader(t *testing.T) { }) allocatorLeaderMap[dcLocation] = pdName } - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) @@ -321,9 +321,9 @@ func TestTSOFollowerProxy(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli1 := setupCli(re, ctx, endpoints) + cli1 := setupCli(ctx, re, endpoints) defer cli1.Close() - cli2 := setupCli(re, ctx, endpoints) + cli2 := setupCli(ctx, re, endpoints) defer cli2.Close() cli2.UpdateOption(pd.EnableTSOFollowerProxy, true) @@ -385,7 +385,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() var wg sync.WaitGroup @@ -417,7 +417,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) { leader.Stop() re.NotEmpty(cluster.WaitLeader()) leaderReadyTime = time.Now() - cluster.RunServers([]*tests.TestServer{leader}) + tests.RunServers([]*tests.TestServer{leader}) }() wg.Wait() re.Less(maxUnavailableTime.UnixMilli(), leaderReadyTime.Add(1*time.Second).UnixMilli()) @@ -458,14 +458,14 @@ func TestGlobalAndLocalTSO(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) // Join a new dc-location - pd4, err := cluster.Join(ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(ctx, func(conf *config.Config, _ string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) @@ -586,7 +586,7 @@ func TestCustomTimeout(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints, pd.WithCustomTimeoutOption(time.Second)) + cli := setupCli(ctx, re, endpoints, pd.WithCustomTimeoutOption(time.Second)) defer cli.Close() start := time.Now() @@ -647,8 +647,7 @@ func (suite *followerForwardAndHandleTestSuite) SetupSuite() { }) } -func (suite *followerForwardAndHandleTestSuite) TearDownTest() { -} +func (*followerForwardAndHandleTestSuite) TearDownTest() {} func (suite *followerForwardAndHandleTestSuite) TearDownSuite() { suite.cluster.Destroy() @@ -660,7 +659,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionByFollowerForwardin ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork1", "return(true)")) time.Sleep(200 * time.Millisecond) @@ -680,7 +679,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoByFollowerForwarding1( re := suite.Require() ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)")) @@ -715,7 +714,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoByFollowerForwarding2( re := suite.Require() ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)")) @@ -752,7 +751,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoAndRegionByFollowerFor follower := cluster.GetServer(cluster.GetFollower()) re.NoError(failpoint.Enable("github.com/tikv/pd/client/grpcutil/unreachableNetwork2", fmt.Sprintf("return(\"%s\")", follower.GetAddr()))) - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() var lastTS uint64 testutil.Eventually(re, func() bool { @@ -821,7 +820,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromLeaderWhenNetwo follower := cluster.GetServer(cluster.GetFollower()) re.NoError(failpoint.Enable("github.com/tikv/pd/client/grpcutil/unreachableNetwork2", fmt.Sprintf("return(\"%s\")", follower.GetAddr()))) - cli := setupCli(re, ctx, suite.endpoints) + cli := setupCli(ctx, re, suite.endpoints) defer cli.Close() cluster.GetLeaderServer().GetServer().GetMember().ResignEtcdLeader(ctx, leader.GetServer().Name(), follower.GetServer().Name()) @@ -854,7 +853,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { defer cancel() cluster := suite.cluster - cli := setupCli(re, ctx, suite.endpoints) + cli := setupCli(ctx, re, suite.endpoints) defer cli.Close() cli.UpdateOption(pd.EnableFollowerHandle, true) re.NotEmpty(cluster.WaitLeader()) @@ -949,7 +948,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTSFuture() { re.NoError(failpoint.Enable("github.com/tikv/pd/client/shortDispatcherChannel", "return(true)")) - cli := setupCli(re, ctx, suite.endpoints) + cli := setupCli(ctx, re, suite.endpoints) ctxs := make([]context.Context, 20) cancels := make([]context.CancelFunc, 20) @@ -1015,7 +1014,7 @@ func runServer(re *require.Assertions, cluster *tests.TestCluster) []string { return endpoints } -func setupCli(re *require.Assertions, ctx context.Context, endpoints []string, opts ...pd.ClientOption) pd.Client { +func setupCli(ctx context.Context, re *require.Assertions, endpoints []string, opts ...pd.ClientOption) pd.Client { cli, err := pd.NewClientWithContext(ctx, endpoints, pd.SecurityOption{}, opts...) re.NoError(err) return cli @@ -1083,7 +1082,7 @@ func TestCloseClient(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) ts := cli.GetTSAsync(context.TODO()) time.Sleep(time.Second) cli.Close() @@ -1171,10 +1170,10 @@ func (suite *clientTestSuite) SetupSuite() { suite.grpcSvr = &server.GrpcServer{Server: suite.srv} server.MustWaitLeader(re, []*server.Server{suite.srv}) - suite.bootstrapServer(re, newHeader(suite.srv), suite.grpcPDClient) + bootstrapServer(re, newHeader(suite.srv), suite.grpcPDClient) suite.ctx, suite.clean = context.WithCancel(context.Background()) - suite.client = setupCli(re, suite.ctx, suite.srv.GetEndpoints()) + suite.client = setupCli(suite.ctx, re, suite.srv.GetEndpoints()) suite.regionHeartbeat, err = suite.grpcPDClient.RegionHeartbeat(suite.ctx) re.NoError(err) @@ -1216,7 +1215,7 @@ func newHeader(srv *server.Server) *pdpb.RequestHeader { } } -func (suite *clientTestSuite) bootstrapServer(re *require.Assertions, header *pdpb.RequestHeader, client pdpb.PDClient) { +func bootstrapServer(re *require.Assertions, header *pdpb.RequestHeader, client pdpb.PDClient) { regionID := regionIDAllocator.alloc() region := &metapb.Region{ Id: regionID, @@ -1781,7 +1780,7 @@ func TestWatch(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - client := setupCli(re, ctx, endpoints) + client := setupCli(ctx, re, endpoints) defer client.Close() key := "test" @@ -1824,7 +1823,7 @@ func TestPutGet(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - client := setupCli(re, ctx, endpoints) + client := setupCli(ctx, re, endpoints) defer client.Close() key := []byte("test") @@ -1859,7 +1858,7 @@ func TestClientWatchWithRevision(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - client := setupCli(re, ctx, endpoints) + client := setupCli(ctx, re, endpoints) defer client.Close() s := cluster.GetLeaderServer() watchPrefix := "watch_test" @@ -1927,7 +1926,7 @@ func (suite *clientTestSuite) TestMemberUpdateBackOff() { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) diff --git a/tests/integrations/client/client_tls_test.go b/tests/integrations/client/client_tls_test.go index bdfe050bf45..a5f0f5b200d 100644 --- a/tests/integrations/client/client_tls_test.go +++ b/tests/integrations/client/client_tls_test.go @@ -120,18 +120,18 @@ func TestTLSReloadAtomicReplace(t *testing.T) { err = os.Rename(certsDirExp, certsDir) re.NoError(err) } - testTLSReload(re, ctx, cloneFunc, replaceFunc, revertFunc) + testTLSReload(ctx, re, cloneFunc, replaceFunc, revertFunc) } func testTLSReload( - re *require.Assertions, ctx context.Context, + re *require.Assertions, cloneFunc func() transport.TLSInfo, replaceFunc func(), revertFunc func()) { tlsInfo := cloneFunc() // 1. start cluster with valid certs - clus, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + clus, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Security.TLSConfig = grpcutil.TLSConfig{ KeyPath: tlsInfo.KeyFile, CertPath: tlsInfo.CertFile, diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index 737fd09a08f..0913579f47e 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -135,6 +135,7 @@ func (suite *gcClientTestSuite) TestClientWatchWithRevision() { suite.testClientWatchWithRevision(true) } +// nolint:revive func (suite *gcClientTestSuite) testClientWatchWithRevision(fromNewRevision bool) { re := suite.Require() testKeyspaceID := uint32(100) diff --git a/tests/integrations/client/global_config_test.go b/tests/integrations/client/global_config_test.go index c52a35159b0..d813ec99676 100644 --- a/tests/integrations/client/global_config_test.go +++ b/tests/integrations/client/global_config_test.go @@ -89,7 +89,7 @@ func (suite *globalConfigTestSuite) TearDownSuite() { suite.client.Close() } -func (suite *globalConfigTestSuite) GetEtcdPath(configPath string) string { +func getEtcdPath(configPath string) string { return globalConfigPath + configPath } @@ -97,10 +97,10 @@ func (suite *globalConfigTestSuite) TestLoadWithoutNames() { re := suite.Require() defer func() { // clean up - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) }() - r, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("test"), "test") + r, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath("test"), "test") re.NoError(err) res, err := suite.server.LoadGlobalConfig(suite.server.Context(), &pdpb.LoadGlobalConfigRequest{ ConfigPath: globalConfigPath, @@ -115,10 +115,10 @@ func (suite *globalConfigTestSuite) TestLoadWithoutConfigPath() { re := suite.Require() defer func() { // clean up - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("source_id")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("source_id")) re.NoError(err) }() - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("source_id"), "1") + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath("source_id"), "1") re.NoError(err) res, err := suite.server.LoadGlobalConfig(suite.server.Context(), &pdpb.LoadGlobalConfigRequest{ Names: []string{"source_id"}, @@ -132,7 +132,7 @@ func (suite *globalConfigTestSuite) TestLoadOtherConfigPath() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -155,7 +155,7 @@ func (suite *globalConfigTestSuite) TestLoadAndStore() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) } }() @@ -171,7 +171,7 @@ func (suite *globalConfigTestSuite) TestLoadAndStore() { re.Len(res.Items, 3) re.NoError(err) for i, item := range res.Items { - re.Equal(&pdpb.GlobalConfigItem{Kind: pdpb.EventType_PUT, Name: suite.GetEtcdPath(strconv.Itoa(i)), Payload: []byte(strconv.Itoa(i))}, item) + re.Equal(&pdpb.GlobalConfigItem{Kind: pdpb.EventType_PUT, Name: getEtcdPath(strconv.Itoa(i)), Payload: []byte(strconv.Itoa(i))}, item) } } @@ -179,7 +179,7 @@ func (suite *globalConfigTestSuite) TestStore() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) } }() @@ -190,9 +190,9 @@ func (suite *globalConfigTestSuite) TestStore() { }) re.NoError(err) for i := 0; i < 3; i++ { - res, err := suite.server.GetClient().Get(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + res, err := suite.server.GetClient().Get(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) - re.Equal(suite.GetEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) + re.Equal(getEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) } } @@ -201,7 +201,7 @@ func (suite *globalConfigTestSuite) TestWatch() { defer func() { for i := 0; i < 3; i++ { // clean up - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -213,11 +213,11 @@ func (suite *globalConfigTestSuite) TestWatch() { Revision: 0, }, server) for i := 0; i < 6; i++ { - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } for i := 3; i < 6; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } res, err := suite.server.LoadGlobalConfig(suite.server.Context(), &pdpb.LoadGlobalConfigRequest{ @@ -231,29 +231,29 @@ func (suite *globalConfigTestSuite) TestClientLoadWithoutNames() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } res, _, err := suite.client.LoadGlobalConfig(suite.server.Context(), nil, globalConfigPath) re.NoError(err) re.Len(res, 3) for i, item := range res { - re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: suite.GetEtcdPath(strconv.Itoa(i)), PayLoad: []byte(strconv.Itoa(i)), Value: strconv.Itoa(i)}, item) + re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: getEtcdPath(strconv.Itoa(i)), PayLoad: []byte(strconv.Itoa(i)), Value: strconv.Itoa(i)}, item) } } func (suite *globalConfigTestSuite) TestClientLoadWithoutConfigPath() { re := suite.Require() defer func() { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("source_id")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("source_id")) re.NoError(err) }() - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("source_id"), "1") + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath("source_id"), "1") re.NoError(err) res, _, err := suite.client.LoadGlobalConfig(suite.server.Context(), []string{"source_id"}, "") re.NoError(err) @@ -265,7 +265,7 @@ func (suite *globalConfigTestSuite) TestClientLoadOtherConfigPath() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -285,7 +285,7 @@ func (suite *globalConfigTestSuite) TestClientStore() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -293,9 +293,9 @@ func (suite *globalConfigTestSuite) TestClientStore() { []pd.GlobalConfigItem{{Name: "0", Value: "0"}, {Name: "1", Value: "1"}, {Name: "2", Value: "2"}}) re.NoError(err) for i := 0; i < 3; i++ { - res, err := suite.server.GetClient().Get(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + res, err := suite.server.GetClient().Get(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) - re.Equal(suite.GetEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) + re.Equal(getEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) } } @@ -303,25 +303,25 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { re := suite.Require() ctx := suite.server.Context() defer func() { - _, err := suite.server.GetClient().Delete(ctx, suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(ctx, getEtcdPath("test")) re.NoError(err) for i := 3; i < 9; i++ { - _, err := suite.server.GetClient().Delete(ctx, suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(ctx, getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() // Mock get revision by loading - r, err := suite.server.GetClient().Put(ctx, suite.GetEtcdPath("test"), "test") + r, err := suite.server.GetClient().Put(ctx, getEtcdPath("test"), "test") re.NoError(err) res, revision, err := suite.client.LoadGlobalConfig(ctx, nil, globalConfigPath) re.NoError(err) re.Len(res, 1) suite.LessOrEqual(r.Header.GetRevision(), revision) - re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: suite.GetEtcdPath("test"), PayLoad: []byte("test"), Value: "test"}, res[0]) + re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: getEtcdPath("test"), PayLoad: []byte("test"), Value: "test"}, res[0]) // Mock when start watcher there are existed some keys, will load firstly for i := 0; i < 6; i++ { - _, err = suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err = suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } // Start watcher at next revision @@ -329,12 +329,12 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { re.NoError(err) // Mock delete for i := 0; i < 3; i++ { - _, err = suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err = suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } // Mock put for i := 6; i < 9; i++ { - _, err = suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err = suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } timer := time.NewTimer(time.Second) @@ -347,7 +347,7 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { return case res := <-configChan: for _, r := range res { - re.Equal(suite.GetEtcdPath(r.Value), r.Name) + re.Equal(getEtcdPath(r.Value), r.Name) } runTest = true } diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index 9efbc587847..d35b7f00584 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -121,7 +121,7 @@ func (suite *httpClientTestSuite) TearDownSuite() { // RunTestInTwoModes is to run test in two modes. func (suite *httpClientTestSuite) RunTestInTwoModes(test func(mode mode, client pd.Client)) { // Run test with specific service discovery. - cli := setupCli(suite.Require(), suite.env[specificServiceDiscovery].ctx, suite.env[specificServiceDiscovery].endpoints) + cli := setupCli(suite.env[specificServiceDiscovery].ctx, suite.Require(), suite.env[specificServiceDiscovery].endpoints) sd := cli.GetServiceDiscovery() client := pd.NewClientWithServiceDiscovery("pd-http-client-it-grpc", sd) test(specificServiceDiscovery, client) @@ -268,7 +268,7 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { re.NoError(err) re.Equal(bundles[0], bundle) // Check if we have the default rule. - suite.checkRuleResult(re, env, client, &pd.Rule{ + checkRuleResult(re, env, client, &pd.Rule{ GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: pd.Voter, @@ -277,7 +277,7 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { EndKey: []byte{}, }, 1, true) // Should be the same as the rules in the bundle. - suite.checkRuleResult(re, env, client, bundle.Rules[0], 1, true) + checkRuleResult(re, env, client, bundle.Rules[0], 1, true) testRule := &pd.Rule{ GroupID: placement.DefaultGroupID, ID: "test", @@ -288,24 +288,24 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { } err = client.SetPlacementRule(env.ctx, testRule) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 2, true) + checkRuleResult(re, env, client, testRule, 2, true) err = client.DeletePlacementRule(env.ctx, placement.DefaultGroupID, "test") re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, false) + checkRuleResult(re, env, client, testRule, 1, false) testRuleOp := &pd.RuleOp{ Rule: testRule, Action: pd.RuleOpAdd, } err = client.SetPlacementRuleInBatch(env.ctx, []*pd.RuleOp{testRuleOp}) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 2, true) + checkRuleResult(re, env, client, testRule, 2, true) testRuleOp = &pd.RuleOp{ Rule: testRule, Action: pd.RuleOpDel, } err = client.SetPlacementRuleInBatch(env.ctx, []*pd.RuleOp{testRuleOp}) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, false) + checkRuleResult(re, env, client, testRule, 1, false) err = client.SetPlacementRuleBundles(env.ctx, []*pd.GroupBundle{ { ID: placement.DefaultGroupID, @@ -313,7 +313,7 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { }, }, true) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, true) + checkRuleResult(re, env, client, testRule, 1, true) ruleGroups, err := client.GetAllPlacementRuleGroups(env.ctx) re.NoError(err) re.Len(ruleGroups, 1) @@ -347,10 +347,10 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { } err = client.SetPlacementRule(env.ctx, testRule) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, true) + checkRuleResult(re, env, client, testRule, 1, true) } -func (suite *httpClientTestSuite) checkRuleResult( +func checkRuleResult( re *require.Assertions, env *httpClientTestEnv, client pd.Client, @@ -724,7 +724,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { re := suite.Require() env := suite.env[defaultServiceDiscovery] - cli := setupCli(suite.Require(), env.ctx, env.endpoints) + cli := setupCli(env.ctx, suite.Require(), env.endpoints) defer cli.Close() sd := cli.GetServiceDiscovery() diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index 1b61a264232..69d53463818 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -124,9 +124,15 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin re.Empty(primary) serverMap := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < serverNum; i++ { s, cleanup := suite.addServer(serviceName) - defer cleanup() + cleanups = append(cleanups, cleanup) serverMap[s.GetAddr()] = s } diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index ccec0a7cdc0..160eea167d6 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -89,9 +89,15 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { re := suite.Require() // add three nodes. nodes := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount+1; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) @@ -139,9 +145,15 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { func (suite *keyspaceGroupTestSuite) TestAllocReplica() { re := suite.Require() nodes := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) @@ -233,9 +245,15 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { re := suite.Require() nodes := make(map[string]bs.Server) nodesList := []string{} + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s nodesList = append(nodesList, s.GetAddr()) } @@ -294,9 +312,15 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { re := suite.Require() nodes := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) diff --git a/tests/integrations/mcs/resourcemanager/resource_manager_test.go b/tests/integrations/mcs/resourcemanager/resource_manager_test.go index aea0441c7d7..17673213a97 100644 --- a/tests/integrations/mcs/resourcemanager/resource_manager_test.go +++ b/tests/integrations/mcs/resourcemanager/resource_manager_test.go @@ -34,6 +34,7 @@ import ( "github.com/tikv/pd/client/resource_group/controller" "github.com/tikv/pd/pkg/mcs/resourcemanager/server" "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/tests" "go.uber.org/goleak" @@ -77,7 +78,7 @@ func (suite *resourceManagerClientTestSuite) SetupSuite() { suite.client, err = pd.NewClientWithContext(suite.ctx, suite.cluster.GetConfig().GetClientURLs(), pd.SecurityOption{}) re.NoError(err) leader := suite.cluster.GetServer(suite.cluster.WaitLeader()) - suite.waitLeader(re, suite.client, leader.GetAddr()) + waitLeader(re, suite.client, leader.GetAddr()) suite.initGroups = []*rmpb.ResourceGroup{ { @@ -134,7 +135,7 @@ func (suite *resourceManagerClientTestSuite) SetupSuite() { } } -func (suite *resourceManagerClientTestSuite) waitLeader(re *require.Assertions, cli pd.Client, leaderAddr string) { +func waitLeader(re *require.Assertions, cli pd.Client, leaderAddr string) { innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) re.NotNil(innerCli) @@ -176,7 +177,7 @@ func (suite *resourceManagerClientTestSuite) resignAndWaitLeader(re *require.Ass re.NoError(suite.cluster.ResignLeader()) newLeader := suite.cluster.GetServer(suite.cluster.WaitLeader()) re.NotNil(newLeader) - suite.waitLeader(re, suite.client, newLeader.GetAddr()) + waitLeader(re, suite.client, newLeader.GetAddr()) } func (suite *resourceManagerClientTestSuite) TestWatchResourceGroup() { @@ -348,7 +349,7 @@ type tokenConsumptionPerSecond struct { waitDuration time.Duration } -func (t tokenConsumptionPerSecond) makeReadRequest() *controller.TestRequestInfo { +func (tokenConsumptionPerSecond) makeReadRequest() *controller.TestRequestInfo { return controller.NewTestRequestInfo(false, 0, 0) } @@ -364,7 +365,7 @@ func (t tokenConsumptionPerSecond) makeReadResponse() *controller.TestResponseIn ) } -func (t tokenConsumptionPerSecond) makeWriteResponse() *controller.TestResponseInfo { +func (tokenConsumptionPerSecond) makeWriteResponse() *controller.TestResponseInfo { return controller.NewTestResponseInfo( 0, time.Duration(0), @@ -705,7 +706,6 @@ func (suite *resourceManagerClientTestSuite) TestResourcePenalty() { c.Stop() } -// nolint:gosec func (suite *resourceManagerClientTestSuite) TestAcquireTokenBucket() { re := suite.Require() cli := suite.client @@ -959,7 +959,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { re.NoError(err) resp, err := http.Post(getAddr(i)+"/resource-manager/api/v1/config/group", "application/json", strings.NewReader(string(createJSON))) re.NoError(err) - defer resp.Body.Close() + resp.Body.Close() re.Equal(http.StatusOK, resp.StatusCode) if tcase.isNewGroup { finalNum++ @@ -974,7 +974,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { req.Header.Set("Content-Type", "application/json") resp, err = http.DefaultClient.Do(req) re.NoError(err) - defer resp.Body.Close() + resp.Body.Close() if tcase.modifySuccess { re.Equal(http.StatusOK, resp.StatusCode) } else { @@ -984,9 +984,9 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { // Get Resource Group resp, err = http.Get(getAddr(i) + "/resource-manager/api/v1/config/group/" + tcase.name) re.NoError(err) - defer resp.Body.Close() re.Equal(http.StatusOK, resp.StatusCode) respString, err := io.ReadAll(resp.Body) + resp.Body.Close() re.NoError(err) re.Contains(string(respString), tcase.name) if tcase.modifySuccess { @@ -997,9 +997,9 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { if i == len(testCasesSet1)-1 { resp, err := http.Get(getAddr(i) + "/resource-manager/api/v1/config/groups") re.NoError(err) - defer resp.Body.Close() re.Equal(http.StatusOK, resp.StatusCode) respString, err := io.ReadAll(resp.Body) + resp.Body.Close() re.NoError(err) groups := make([]*server.ResourceGroup, 0) json.Unmarshal(respString, &groups) @@ -1011,8 +1011,8 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { re.NoError(err) resp, err := http.DefaultClient.Do(req) re.NoError(err) - defer resp.Body.Close() respString, err := io.ReadAll(resp.Body) + resp.Body.Close() re.NoError(err) if g.Name == "default" { re.Contains(string(respString), "cannot delete reserved group") @@ -1025,9 +1025,9 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { // verify again resp1, err := http.Get(getAddr(i) + "/resource-manager/api/v1/config/groups") re.NoError(err) - defer resp1.Body.Close() re.Equal(http.StatusOK, resp1.StatusCode) respString1, err := io.ReadAll(resp1.Body) + resp1.Body.Close() re.NoError(err) groups1 := make([]server.ResourceGroup, 0) json.Unmarshal(respString1, &groups1) @@ -1045,7 +1045,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { for _, s := range servers { serverList = append(serverList, s) } - re.NoError(suite.cluster.RunServers(serverList)) + re.NoError(tests.RunServers(serverList)) suite.cluster.WaitLeader() // re-connect client as well suite.client, err = pd.NewClientWithContext(suite.ctx, suite.cluster.GetConfig().GetClientURLs(), pd.SecurityOption{}) @@ -1313,9 +1313,8 @@ func (suite *resourceManagerClientTestSuite) TestCheckBackgroundJobs() { enableBackgroundGroup := func(enable bool) string { if enable { return "background_enable" - } else { - return "background_unable" } + return "background_unable" } // Mock add resource group. group := &rmpb.ResourceGroup{ @@ -1436,14 +1435,20 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupControllerConfigCh waitDuration := 10 * time.Second readBaseCost := 1.5 defaultCfg := controller.DefaultConfig() - // failpoint enableDegradedMode will setup and set it be 1s. - defaultCfg.DegradedModeWaitDuration.Duration = time.Second + expectCfg := server.ControllerConfig{ + // failpoint enableDegradedMode will setup and set it be 1s. + DegradedModeWaitDuration: typeutil.NewDuration(time.Second), + LTBMaxWaitDuration: typeutil.Duration(defaultCfg.LTBMaxWaitDuration), + RequestUnit: server.RequestUnitConfig(defaultCfg.RequestUnit), + EnableControllerTraceLog: defaultCfg.EnableControllerTraceLog, + } expectRUCfg := controller.GenerateRUConfig(defaultCfg) + expectRUCfg.DegradedModeWaitDuration = time.Second // initial config verification respString := sendRequest("GET", getAddr()+configURL, nil) - defaultString, err := json.Marshal(defaultCfg) + expectStr, err := json.Marshal(expectCfg) re.NoError(err) - re.JSONEq(string(respString), string(defaultString)) + re.JSONEq(string(respString), string(expectStr)) re.EqualValues(expectRUCfg, c1.GetConfig()) testCases := []struct { diff --git a/tests/integrations/mcs/tso/api_test.go b/tests/integrations/mcs/tso/api_test.go index 32725418462..dc9bfa1e291 100644 --- a/tests/integrations/mcs/tso/api_test.go +++ b/tests/integrations/mcs/tso/api_test.go @@ -141,7 +141,7 @@ func TestTSOServerStartFirst(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - apiCluster, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + apiCluster, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{"k1", "k2"} }) defer apiCluster.Destroy() diff --git a/tests/integrations/mcs/tso/proxy_test.go b/tests/integrations/mcs/tso/proxy_test.go index 7ed329610f2..43877f262e2 100644 --- a/tests/integrations/mcs/tso/proxy_test.go +++ b/tests/integrations/mcs/tso/proxy_test.go @@ -84,7 +84,7 @@ func (s *tsoProxyTestSuite) SetupSuite() { } func (s *tsoProxyTestSuite) TearDownSuite() { - s.cleanupGRPCStreams(s.cleanupFuncs) + cleanupGRPCStreams(s.cleanupFuncs) s.tsoCluster.Destroy() s.apiCluster.Destroy() s.cancel() @@ -112,7 +112,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyWorksWithCancellation() { for j := 0; j < 10; j++ { s.verifyTSOProxy(s.ctx, streams, cleanupFuncs, 10, true) } - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) } }() for i := 0; i < 10; i++ { @@ -125,7 +125,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyWorksWithCancellation() { // TestTSOProxyStress tests the TSO Proxy can work correctly under the stress. gPRC and TSO failures are allowed, // but the TSO Proxy should not panic, blocked or deadlocked, and if it returns a timestamp, it should be a valid // timestamp monotonic increasing. After the stress, the TSO Proxy should still work correctly. -func TestTSOProxyStress(t *testing.T) { +func TestTSOProxyStress(_ *testing.T) { s := new(tsoProxyTestSuite) s.SetT(&testing.T{}) s.SetupSuite() @@ -154,7 +154,7 @@ func TestTSOProxyStress(t *testing.T) { cleanupFuncs = append(cleanupFuncs, cleanupFuncsTemp...) s.verifyTSOProxy(ctxTimeout, streams, cleanupFuncs, 50, false) } - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) log.Info("the stress test completed.") // Verify the TSO Proxy can still work correctly after the stress. @@ -192,7 +192,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyClientsWithSameContext() { } s.verifyTSOProxy(ctx, streams, cleanupFuncs, 100, true) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) } // TestTSOProxyRecvFromClientTimeout tests the TSO Proxy can properly close the grpc stream on the server side @@ -207,7 +207,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyRecvFromClientTimeout() { time.Sleep(2 * time.Second) err := streams[0].Send(s.defaultReq) re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxyRecvFromClientTimeout")) // Verify the streams with no fault injection can work correctly. @@ -226,7 +226,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyFailToSendToClient() { re.NoError(err) _, err = streams[0].Recv() re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxyFailToSendToClient")) s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 1, true) @@ -244,7 +244,7 @@ func (s *tsoProxyTestSuite) TestTSOProxySendToTSOTimeout() { re.NoError(err) _, err = streams[0].Recv() re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxySendToTSOTimeout")) s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 1, true) @@ -262,13 +262,13 @@ func (s *tsoProxyTestSuite) TestTSOProxyRecvFromTSOTimeout() { re.NoError(err) _, err = streams[0].Recv() re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxyRecvFromTSOTimeout")) s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 1, true) } -func (s *tsoProxyTestSuite) cleanupGRPCStreams(cleanupFuncs []testutil.CleanupFunc) { +func cleanupGRPCStreams(cleanupFuncs []testutil.CleanupFunc) { for i := 0; i < len(cleanupFuncs); i++ { if cleanupFuncs[i] != nil { cleanupFuncs[i]() @@ -277,7 +277,7 @@ func (s *tsoProxyTestSuite) cleanupGRPCStreams(cleanupFuncs []testutil.CleanupFu } } -func (s *tsoProxyTestSuite) cleanupGRPCStream( +func cleanupGRPCStream( streams []pdpb.PD_TsoClient, cleanupFuncs []testutil.CleanupFunc, index int, ) { if cleanupFuncs[index] != nil { @@ -318,7 +318,7 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( for j := 0; j < requestsPerClient; j++ { select { case <-ctx.Done(): - s.cleanupGRPCStream(streams, cleanupFuncs, i) + cleanupGRPCStream(streams, cleanupFuncs, i) return default: } @@ -327,14 +327,14 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( err := streams[i].Send(req) if err != nil && !mustReliable { respErr.Store(err) - s.cleanupGRPCStream(streams, cleanupFuncs, i) + cleanupGRPCStream(streams, cleanupFuncs, i) return } re.NoError(err) resp, err := streams[i].Recv() if err != nil && !mustReliable { respErr.Store(err) - s.cleanupGRPCStream(streams, cleanupFuncs, i) + cleanupGRPCStream(streams, cleanupFuncs, i) return } re.NoError(err) @@ -495,7 +495,7 @@ func benchmarkTSOProxyNClients(clientCount int, b *testing.B) { } b.StopTimer() - suite.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) suite.TearDownSuite() } diff --git a/tests/integrations/realcluster/Makefile b/tests/integrations/realcluster/Makefile index 278f585feaa..e161d52a86e 100644 --- a/tests/integrations/realcluster/Makefile +++ b/tests/integrations/realcluster/Makefile @@ -22,8 +22,6 @@ static: install-tools @ gofmt -s -l -d . 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c $(ROOT_PATH)/.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config $(ROOT_PATH)/revive.toml ./... tidy: @ go mod tidy diff --git a/tests/registry/registry_test.go b/tests/registry/registry_test.go index dab2ccae683..416a7420d2e 100644 --- a/tests/registry/registry_test.go +++ b/tests/registry/registry_test.go @@ -41,18 +41,18 @@ func TestMain(m *testing.M) { type testServiceRegistry struct { } -func (t *testServiceRegistry) RegisterGRPCService(g *grpc.Server) { +func (*testServiceRegistry) RegisterGRPCService(g *grpc.Server) { grpc_testing.RegisterTestServiceServer(g, &grpc_testing.UnimplementedTestServiceServer{}) } -func (t *testServiceRegistry) RegisterRESTHandler(userDefineHandlers map[string]http.Handler) { +func (*testServiceRegistry) RegisterRESTHandler(userDefineHandlers map[string]http.Handler) { group := apiutil.APIServiceGroup{ Name: "my-http-service", Version: "v1alpha1", IsCore: false, PathPrefix: "/my-service", } - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte("Hello World!")) }) diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index f9ad908c5ad..b24c7290a73 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -51,7 +51,7 @@ func TestReconnect(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) @@ -577,7 +577,7 @@ func (suite *redirectorTestSuite) SetupSuite() { re := suite.Require() ctx, cancel := context.WithCancel(context.Background()) suite.cleanup = cancel - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) @@ -703,7 +703,7 @@ func TestRemovingProgress(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) re.NoError(err) @@ -857,7 +857,7 @@ func TestSendApiWhenRestartRaftCluster(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) re.NoError(err) @@ -899,7 +899,7 @@ func TestPreparingProgress(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) re.NoError(err) diff --git a/tests/server/api/checker_test.go b/tests/server/api/checker_test.go index 198cfca216f..0304d7fd369 100644 --- a/tests/server/api/checker_test.go +++ b/tests/server/api/checker_test.go @@ -49,7 +49,7 @@ func (suite *checkerTestSuite) TestAPI() { func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { re := suite.Require() - suite.testErrCases(re, cluster) + testErrCases(re, cluster) testCases := []struct { name string @@ -62,12 +62,12 @@ func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { {name: "joint-state"}, } for _, testCase := range testCases { - suite.testGetStatus(re, cluster, testCase.name) - suite.testPauseOrResume(re, cluster, testCase.name) + testGetStatus(re, cluster, testCase.name) + testPauseOrResume(re, cluster, testCase.name) } } -func (suite *checkerTestSuite) testErrCases(re *require.Assertions, cluster *tests.TestCluster) { +func testErrCases(re *require.Assertions, cluster *tests.TestCluster) { urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) // missing args input := make(map[string]any) @@ -97,7 +97,7 @@ func (suite *checkerTestSuite) testErrCases(re *require.Assertions, cluster *tes re.NoError(err) } -func (suite *checkerTestSuite) testGetStatus(re *require.Assertions, cluster *tests.TestCluster, name string) { +func testGetStatus(re *require.Assertions, cluster *tests.TestCluster, name string) { input := make(map[string]any) urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) // normal run @@ -128,7 +128,7 @@ func (suite *checkerTestSuite) testGetStatus(re *require.Assertions, cluster *te re.False(resp["paused"].(bool)) } -func (suite *checkerTestSuite) testPauseOrResume(re *require.Assertions, cluster *tests.TestCluster, name string) { +func testPauseOrResume(re *require.Assertions, cluster *tests.TestCluster, name string) { input := make(map[string]any) urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) resp := make(map[string]any) diff --git a/tests/server/api/operator_test.go b/tests/server/api/operator_test.go index 32ca4ea300d..a5cd865b454 100644 --- a/tests/server/api/operator_test.go +++ b/tests/server/api/operator_test.go @@ -26,7 +26,6 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/operator" @@ -56,7 +55,7 @@ func TestOperatorTestSuite(t *testing.T) { func (suite *operatorTestSuite) SetupSuite() { suite.env = tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) } @@ -71,7 +70,7 @@ func (suite *operatorTestSuite) TestAddRemovePeer() { func (suite *operatorTestSuite) checkAddRemovePeer(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) stores := []*metapb.Store{ { Id: 1, @@ -206,7 +205,7 @@ func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestClus tests.MustPutStore(re, cluster, store) } - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1)) tests.MustPutRegionInfo(re, cluster, r1) r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetWrittenBytes(2000), core.SetReadBytes(0), core.SetRegionConfVer(2), core.SetRegionVersion(3)) @@ -233,7 +232,7 @@ func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestClus func (suite *operatorTestSuite) TestTransferRegionWithPlacementRule() { // use a new environment to avoid affecting other tests env := tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 3 }) env.RunTestInTwoModes(suite.checkTransferRegionWithPlacementRule) @@ -242,7 +241,7 @@ func (suite *operatorTestSuite) TestTransferRegionWithPlacementRule() { func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) stores := []*metapb.Store{ { Id: 1, @@ -513,7 +512,7 @@ func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *te func (suite *operatorTestSuite) TestGetOperatorsAsObject() { // use a new environment to avoid being affected by other tests env := tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) env.RunTestInTwoModes(suite.checkGetOperatorsAsObject) @@ -522,7 +521,7 @@ func (suite *operatorTestSuite) TestGetOperatorsAsObject() { func (suite *operatorTestSuite) checkGetOperatorsAsObject(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) stores := []*metapb.Store{ { Id: 1, @@ -612,19 +611,6 @@ func (suite *operatorTestSuite) checkGetOperatorsAsObject(cluster *tests.TestClu re.Equal("admin-add-peer", resp[2].Desc) } -// pauseRuleChecker will pause rule checker to avoid unexpected operator. -func (suite *operatorTestSuite) pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { - checkerName := "rule" - addr := cluster.GetLeaderServer().GetAddr() - resp := make(map[string]any) - url := fmt.Sprintf("%s/pd/api/v1/checker/%s", addr, checkerName) - err := tu.CheckPostJSON(testDialClient, url, []byte(`{"delay":1000}`), tu.StatusOK(re)) - re.NoError(err) - err = tu.ReadGetJSON(re, testDialClient, url, &resp) - re.NoError(err) - re.True(resp["paused"].(bool)) -} - func (suite *operatorTestSuite) TestRemoveOperators() { suite.env.RunTestInTwoModes(suite.checkRemoveOperators) } @@ -656,7 +642,7 @@ func (suite *operatorTestSuite) checkRemoveOperators(cluster *tests.TestCluster) tests.MustPutStore(re, cluster, store) } - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1)) tests.MustPutRegionInfo(re, cluster, r1) r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetWrittenBytes(2000), core.SetReadBytes(0), core.SetRegionConfVer(2), core.SetRegionVersion(3)) diff --git a/tests/server/api/region_test.go b/tests/server/api/region_test.go index 8c286dc12e2..b233ce94a99 100644 --- a/tests/server/api/region_test.go +++ b/tests/server/api/region_test.go @@ -114,14 +114,14 @@ func (suite *regionTestSuite) checkSplitRegions(cluster *tests.TestCluster) { r1 := core.NewTestRegionInfo(601, 13, []byte("aaa"), []byte("ggg")) r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 5, StoreId: 14}, &metapb.Peer{Id: 6, StoreId: 15}) tests.MustPutRegionInfo(re, cluster, r1) - suite.checkRegionCount(re, cluster, 1) + checkRegionCount(re, cluster, 1) newRegionID := uint64(11) body := fmt.Sprintf(`{"retry_limit":%v, "split_keys": ["%s","%s","%s"]}`, 3, hex.EncodeToString([]byte("bbb")), hex.EncodeToString([]byte("ccc")), hex.EncodeToString([]byte("ddd"))) - checkOpt := func(res []byte, code int, _ http.Header) { + checkOpt := func(res []byte, _ int, _ http.Header) { s := &struct { ProcessedPercentage int `json:"processed-percentage"` NewRegionsID []uint64 `json:"regions-id"` @@ -159,7 +159,7 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tes r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 100 + i, StoreId: (i + 1) % regionCount}, &metapb.Peer{Id: 200 + i, StoreId: (i + 2) % regionCount}) tests.MustPutRegionInfo(re, cluster, r1) } - suite.checkRegionCount(re, cluster, regionCount) + checkRegionCount(re, cluster, regionCount) body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3"))) err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/accelerate-schedule", urlPrefix), []byte(body), @@ -194,7 +194,7 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 100 + i, StoreId: (i + 1) % regionCount}, &metapb.Peer{Id: 200 + i, StoreId: (i + 2) % regionCount}) tests.MustPutRegionInfo(re, cluster, r1) } - suite.checkRegionCount(re, cluster, regionCount) + checkRegionCount(re, cluster, regionCount) body := fmt.Sprintf(`[{"start_key":"%s", "end_key": "%s"}, {"start_key":"%s", "end_key": "%s"}]`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")), hex.EncodeToString([]byte("a4")), hex.EncodeToString([]byte("a6"))) @@ -236,7 +236,7 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) { tests.MustPutRegionInfo(re, cluster, r1) tests.MustPutRegionInfo(re, cluster, r2) tests.MustPutRegionInfo(re, cluster, r3) - suite.checkRegionCount(re, cluster, 3) + checkRegionCount(re, cluster, 3) body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("b1")), hex.EncodeToString([]byte("b3"))) err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/scatter", urlPrefix), []byte(body), tu.StatusOK(re)) @@ -263,7 +263,7 @@ func (suite *regionTestSuite) TestCheckRegionsReplicated() { func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) leader := cluster.GetLeaderServer() urlPrefix := leader.GetAddr() + "/pd/api/v1" @@ -276,7 +276,7 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) tests.MustPutStore(re, cluster, s1) r1 := core.NewTestRegionInfo(2, 1, []byte("a"), []byte("b")) tests.MustPutRegionInfo(re, cluster, r1) - suite.checkRegionCount(re, cluster, 1) + checkRegionCount(re, cluster, 1) // set the bundle bundle := []placement.GroupBundle{ @@ -404,7 +404,7 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) }) } -func (suite *regionTestSuite) checkRegionCount(re *require.Assertions, cluster *tests.TestCluster, count uint64) { +func checkRegionCount(re *require.Assertions, cluster *tests.TestCluster, count uint64) { leader := cluster.GetLeaderServer() tu.Eventually(re, func() bool { return leader.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count == int(count) @@ -417,7 +417,7 @@ func (suite *regionTestSuite) checkRegionCount(re *require.Assertions, cluster * } // pauseRuleChecker will pause rule checker to avoid unexpected operator. -func (suite *regionTestSuite) pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { +func pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { checkerName := "rule" addr := cluster.GetLeaderServer().GetAddr() resp := make(map[string]any) diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 83ab0f1cebb..4f60b5cfb28 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -49,7 +49,7 @@ func TestRuleTestSuite(t *testing.T) { } func (suite *ruleTestSuite) SetupSuite() { - suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, _ string) { conf.PDServerCfg.KeyType = "raw" conf.Replication.EnablePlacementRules = true }) @@ -235,7 +235,7 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { if testCase.found { tu.Eventually(re, func() bool { err = tu.ReadGetJSON(re, testDialClient, url, &resp) - return suite.compareRule(&resp, &testCase.rule) + return compareRule(&resp, &testCase.rule) }) } else { err = tu.CheckGetJSON(testDialClient, url, nil, tu.Status(re, testCase.code)) @@ -432,7 +432,7 @@ func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { return false } if testCase.count == 2 { - return suite.compareRule(resp[0], &rule) && suite.compareRule(resp[1], &rule1) + return compareRule(resp[0], &rule) && compareRule(resp[1], &rule1) } return true }) @@ -492,7 +492,7 @@ func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { err = tu.ReadGetJSON(re, testDialClient, url, &resp) for _, r := range resp { if r.GroupID == "e" { - return suite.compareRule(r, &rule) + return compareRule(r, &rule) } } return true @@ -780,7 +780,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { }, }, } - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) // Set b2 := placement.GroupBundle{ @@ -797,17 +797,17 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { re.NoError(err) // Get - suite.assertBundleEqual(re, urlPrefix+"/placement-rule/foo", b2) + assertBundleEqual(re, urlPrefix+"/placement-rule/foo", b2) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2}, 2) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2}, 2) // Delete err = tu.CheckDelete(testDialClient, urlPrefix+"/placement-rule/pd", tu.StatusOK(re)) re.NoError(err) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b2}, 1) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b2}, 1) // SetAll b2.Rules = append(b2.Rules, &placement.Rule{GroupID: "foo", ID: "baz", Index: 2, Role: placement.Follower, Count: 1}) @@ -819,14 +819,14 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { re.NoError(err) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2, b3}, 3) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2, b3}, 3) // Delete using regexp err = tu.CheckDelete(testDialClient, urlPrefix+"/placement-rule/"+url.PathEscape("foo.*")+"?regexp", tu.StatusOK(re)) re.NoError(err) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) // Set id := "rule-without-group-id" @@ -844,10 +844,10 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { b4.ID = id b4.Rules[0].GroupID = b4.ID // Get - suite.assertBundleEqual(re, urlPrefix+"/placement-rule/"+id, b4) + assertBundleEqual(re, urlPrefix+"/placement-rule/"+id, b4) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4}, 2) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4}, 2) // SetAll b5 := placement.GroupBundle{ @@ -865,7 +865,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { b5.Rules[0].GroupID = b5.ID // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4, b5}, 3) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4, b5}, 3) } func (suite *ruleTestSuite) TestBundleBadRequest() { @@ -1194,18 +1194,18 @@ func (suite *ruleTestSuite) checkLargeRules(cluster *tests.TestCluster) { suite.postAndCheckRuleBundle(urlPrefix, genBundlesWithRulesNum(etcdutil.MaxEtcdTxnOps*2)) } -func (suite *ruleTestSuite) assertBundleEqual(re *require.Assertions, url string, expectedBundle placement.GroupBundle) { +func assertBundleEqual(re *require.Assertions, url string, expectedBundle placement.GroupBundle) { var bundle placement.GroupBundle tu.Eventually(re, func() bool { err := tu.ReadGetJSON(re, testDialClient, url, &bundle) if err != nil { return false } - return suite.compareBundle(bundle, expectedBundle) + return compareBundle(bundle, expectedBundle) }) } -func (suite *ruleTestSuite) assertBundlesEqual(re *require.Assertions, url string, expectedBundles []placement.GroupBundle, expectedLen int) { +func assertBundlesEqual(re *require.Assertions, url string, expectedBundles []placement.GroupBundle, expectedLen int) { var bundles []placement.GroupBundle tu.Eventually(re, func() bool { err := tu.ReadGetJSON(re, testDialClient, url, &bundles) @@ -1218,7 +1218,7 @@ func (suite *ruleTestSuite) assertBundlesEqual(re *require.Assertions, url strin sort.Slice(bundles, func(i, j int) bool { return bundles[i].ID < bundles[j].ID }) sort.Slice(expectedBundles, func(i, j int) bool { return expectedBundles[i].ID < expectedBundles[j].ID }) for i := range bundles { - if !suite.compareBundle(bundles[i], expectedBundles[i]) { + if !compareBundle(bundles[i], expectedBundles[i]) { return false } } @@ -1226,21 +1226,21 @@ func (suite *ruleTestSuite) assertBundlesEqual(re *require.Assertions, url strin }) } -func (suite *ruleTestSuite) compareBundle(b1, b2 placement.GroupBundle) bool { +func compareBundle(b1, b2 placement.GroupBundle) bool { if b2.ID != b1.ID || b2.Index != b1.Index || b2.Override != b1.Override || len(b2.Rules) != len(b1.Rules) { return false } sort.Slice(b1.Rules, func(i, j int) bool { return b1.Rules[i].ID < b1.Rules[j].ID }) sort.Slice(b2.Rules, func(i, j int) bool { return b2.Rules[i].ID < b2.Rules[j].ID }) for i := range b1.Rules { - if !suite.compareRule(b1.Rules[i], b2.Rules[i]) { + if !compareRule(b1.Rules[i], b2.Rules[i]) { return false } } return true } -func (suite *ruleTestSuite) compareRule(r1 *placement.Rule, r2 *placement.Rule) bool { +func compareRule(r1 *placement.Rule, r2 *placement.Rule) bool { return r2.GroupID == r1.GroupID && r2.ID == r1.ID && r2.StartKeyHex == r1.StartKeyHex && @@ -1267,7 +1267,7 @@ func (suite *ruleTestSuite) postAndCheckRuleBundle(urlPrefix string, bundle []pl sort.Slice(respBundle, func(i, j int) bool { return respBundle[i].ID < respBundle[j].ID }) sort.Slice(bundle, func(i, j int) bool { return bundle[i].ID < bundle[j].ID }) for i := range respBundle { - if !suite.compareBundle(respBundle[i], bundle[i]) { + if !compareBundle(respBundle[i], bundle[i]) { return false } } @@ -1285,7 +1285,7 @@ func TestRegionRuleTestSuite(t *testing.T) { } func (suite *regionRuleTestSuite) SetupSuite() { - suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, _ string) { conf.Replication.EnablePlacementRules = true conf.Replication.MaxReplicas = 1 }) @@ -1396,14 +1396,14 @@ func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCl re.Equal("keyspaces/0", labels[0].ID) u = fmt.Sprintf("%s/config/region-label/rules/ids", urlPrefix) - err = tu.CheckGetJSON(testDialClient, u, []byte(`["rule1", "rule3"]`), func(resp []byte, statusCode int, _ http.Header) { + err = tu.CheckGetJSON(testDialClient, u, []byte(`["rule1", "rule3"]`), func(resp []byte, _ int, _ http.Header) { err := json.Unmarshal(resp, &labels) re.NoError(err) re.Empty(labels) }) re.NoError(err) - err = tu.CheckGetJSON(testDialClient, u, []byte(`["keyspaces/0"]`), func(resp []byte, statusCode int, _ http.Header) { + err = tu.CheckGetJSON(testDialClient, u, []byte(`["keyspaces/0"]`), func(resp []byte, _ int, _ http.Header) { err := json.Unmarshal(resp, &labels) re.NoError(err) re.Len(labels, 1) diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 2329077209d..4f71315803a 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -123,7 +123,7 @@ func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { re.NoError(failpoint.Disable("github.com/tikv/pd/server/config/persistFail")) err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) re.NoError(err) - suite.assertNoScheduler(re, urlPrefix, "evict-leader-scheduler") + assertNoScheduler(re, urlPrefix, "evict-leader-scheduler") re.NoError(tu.CheckGetJSON(testDialClient, listURL, nil, tu.Status(re, http.StatusNotFound))) err = tu.CheckDelete(testDialClient, deleteURL, tu.Status(re, http.StatusNotFound)) re.NoError(err) @@ -531,8 +531,8 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if testCase.extraTestFunc != nil { testCase.extraTestFunc(testCase.createdName) } - suite.deleteScheduler(re, urlPrefix, testCase.createdName) - suite.assertNoScheduler(re, urlPrefix, testCase.createdName) + deleteScheduler(re, urlPrefix, testCase.createdName) + assertNoScheduler(re, urlPrefix, testCase.createdName) } // test pause and resume all schedulers. @@ -546,7 +546,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { } body, err := json.Marshal(input) re.NoError(err) - suite.addScheduler(re, urlPrefix, body) + addScheduler(re, urlPrefix, body) suite.assertSchedulerExists(urlPrefix, testCase.createdName) // wait for scheduler to be synced. if testCase.extraTestFunc != nil { testCase.extraTestFunc(testCase.createdName) @@ -566,7 +566,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.True(isPaused) } input["delay"] = 1 @@ -580,7 +580,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) } @@ -600,7 +600,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) } @@ -610,8 +610,8 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - suite.deleteScheduler(re, urlPrefix, createdName) - suite.assertNoScheduler(re, urlPrefix, createdName) + deleteScheduler(re, urlPrefix, createdName) + assertNoScheduler(re, urlPrefix, createdName) } } @@ -638,7 +638,7 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { input["name"] = name body, err := json.Marshal(input) re.NoError(err) - suite.addScheduler(re, urlPrefix, body) + addScheduler(re, urlPrefix, body) u := fmt.Sprintf("%s%s/api/v1/config/schedule", leaderAddr, apiPrefix) var scheduleConfig sc.ScheduleConfig @@ -652,7 +652,7 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, u, body, tu.StatusOK(re)) re.NoError(err) - suite.assertNoScheduler(re, urlPrefix, name) + assertNoScheduler(re, urlPrefix, name) suite.assertSchedulerExists(fmt.Sprintf("%s?status=disabled", urlPrefix), name) // reset schedule config @@ -662,16 +662,16 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, u, body, tu.StatusOK(re)) re.NoError(err) - suite.deleteScheduler(re, urlPrefix, name) - suite.assertNoScheduler(re, urlPrefix, name) + deleteScheduler(re, urlPrefix, name) + assertNoScheduler(re, urlPrefix, name) } -func (suite *scheduleTestSuite) addScheduler(re *require.Assertions, urlPrefix string, body []byte) { +func addScheduler(re *require.Assertions, urlPrefix string, body []byte) { err := tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusOK(re)) re.NoError(err) } -func (suite *scheduleTestSuite) deleteScheduler(re *require.Assertions, urlPrefix string, createdName string) { +func deleteScheduler(re *require.Assertions, urlPrefix string, createdName string) { deleteURL := fmt.Sprintf("%s/%s", urlPrefix, createdName) err := tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) re.NoError(err) @@ -696,7 +696,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre re.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) re.NoError(err) - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.True(isPaused) input["delay"] = 1 pauseArgs, err = json.Marshal(input) @@ -704,7 +704,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) re.NoError(err) time.Sleep(time.Second * 2) - isPaused = suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused = isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) // test resume. @@ -719,7 +719,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre re.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) re.NoError(err) - isPaused = suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused = isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) } @@ -749,9 +749,9 @@ func (suite *scheduleTestSuite) checkEmptySchedulers(cluster *tests.TestCluster) input["name"] = scheduler body, err := json.Marshal(input) re.NoError(err) - suite.addScheduler(re, urlPrefix, body) + addScheduler(re, urlPrefix, body) } else { - suite.deleteScheduler(re, urlPrefix, scheduler) + deleteScheduler(re, urlPrefix, scheduler) } } tu.Eventually(re, func() bool { @@ -777,7 +777,7 @@ func (suite *scheduleTestSuite) assertSchedulerExists(urlPrefix string, schedule }) } -func (suite *scheduleTestSuite) assertNoScheduler(re *require.Assertions, urlPrefix string, scheduler string) { +func assertNoScheduler(re *require.Assertions, urlPrefix string, scheduler string) { var schedulers []string tu.Eventually(re, func() bool { err := tu.ReadGetJSON(re, testDialClient, urlPrefix, &schedulers, @@ -787,7 +787,7 @@ func (suite *scheduleTestSuite) assertNoScheduler(re *require.Assertions, urlPre }) } -func (suite *scheduleTestSuite) isSchedulerPaused(re *require.Assertions, urlPrefix, name string) bool { +func isSchedulerPaused(re *require.Assertions, urlPrefix, name string) bool { var schedulers []string err := tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s?status=paused", urlPrefix), &schedulers, tu.StatusOK(re)) diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 3415c22a77b..aea5ff73968 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -824,7 +824,7 @@ func TestSetScheduleOpt(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // TODO: enable placementrules - tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, svr string) { cfg.Replication.EnablePlacementRules = false }) + tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() re.NoError(err) @@ -985,7 +985,7 @@ func TestTiFlashWithPlacementRules(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, name string) { cfg.Replication.EnablePlacementRules = false }) + tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() re.NoError(err) err = tc.RunInitialServers() @@ -1035,7 +1035,7 @@ func TestReplicationModeStatus(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.ReplicationMode.ReplicationMode = "dr-auto-sync" }) diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go index 108bc5fc753..b6fcecbd47b 100644 --- a/tests/server/config/config_test.go +++ b/tests/server/config/config_test.go @@ -451,7 +451,7 @@ type ttlConfigInterface interface { IsTikvRegionSplitEnabled() bool } -func (suite *configTestSuite) assertTTLConfig( +func assertTTLConfig( re *require.Assertions, cluster *tests.TestCluster, expectedEqual bool, @@ -488,7 +488,7 @@ func (suite *configTestSuite) assertTTLConfig( } } -func (suite *configTestSuite) assertTTLConfigItemEqual( +func assertTTLConfigItemEqual( re *require.Assertions, cluster *tests.TestCluster, item string, @@ -532,22 +532,22 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { // test no config and cleaning up err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 0), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, false) + assertTTLConfig(re, cluster, false) // test time goes by err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 5), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, true) + assertTTLConfig(re, cluster, true) time.Sleep(5 * time.Second) - suite.assertTTLConfig(re, cluster, false) + assertTTLConfig(re, cluster, false) // test cleaning up err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 5), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, true) + assertTTLConfig(re, cluster, true) err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 0), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, false) + assertTTLConfig(re, cluster, false) postData, err = json.Marshal(invalidTTLConfig) re.NoError(err) @@ -564,9 +564,9 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 1), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfigItemEqual(re, cluster, "max-merge-region-size", uint64(999)) + assertTTLConfigItemEqual(re, cluster, "max-merge-region-size", uint64(999)) // max-merge-region-keys should keep consistence with max-merge-region-size. - suite.assertTTLConfigItemEqual(re, cluster, "max-merge-region-keys", uint64(999*10000)) + assertTTLConfigItemEqual(re, cluster, "max-merge-region-keys", uint64(999*10000)) // on invalid value, we use default config mergeConfig = map[string]any{ @@ -576,7 +576,7 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { re.NoError(err) err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 10), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfigItemEqual(re, cluster, "enable-tikv-split-region", true) + assertTTLConfigItemEqual(re, cluster, "enable-tikv-split-region", true) } func (suite *configTestSuite) TestTTLConflict() { @@ -592,7 +592,7 @@ func (suite *configTestSuite) checkTTLConflict(cluster *tests.TestCluster) { re.NoError(err) err = tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, true) + assertTTLConfig(re, cluster, true) cfg := map[string]any{"max-snapshot-count": 30} postData, err = json.Marshal(cfg) diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index 5cdcbc090b8..32e66c27589 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -105,7 +105,7 @@ func TestFailedAndDeletedPDJoinsPreviousCluster(t *testing.T) { re.NoError(err) // The server should not successfully start. - res := cluster.RunServer(pd3) + res := tests.RunServer(pd3) re.Error(<-res) members, err := etcdutil.ListEtcdMembers(ctx, client) @@ -138,7 +138,7 @@ func TestDeletedPDJoinsPreviousCluster(t *testing.T) { re.NoError(err) // The server should not successfully start. - res := cluster.RunServer(pd3) + res := tests.RunServer(pd3) re.Error(<-res) members, err := etcdutil.ListEtcdMembers(ctx, client) diff --git a/tests/server/keyspace/keyspace_test.go b/tests/server/keyspace/keyspace_test.go index aa2e89296bb..d6e188359ce 100644 --- a/tests/server/keyspace/keyspace_test.go +++ b/tests/server/keyspace/keyspace_test.go @@ -53,7 +53,7 @@ func (suite *keyspaceTestSuite) SetupTest() { re := suite.Require() ctx, cancel := context.WithCancel(context.Background()) suite.cancel = cancel - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = preAllocKeyspace }) suite.cluster = cluster diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 7aadc2772e8..92ed11a75ce 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -152,7 +152,7 @@ func TestLeaderPriority(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.LeaderPriorityCheckInterval = typeutil.NewDuration(time.Second) }) defer cluster.Destroy() diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index 1470173e0ed..f82346571ef 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -40,7 +40,7 @@ func TestRegionSyncer(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/syncer/noFastExitSync", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/syncer/disableClientStreaming", `return(true)`)) - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer func() { cluster.Destroy() cancel() @@ -163,7 +163,7 @@ func TestFullSyncWithAddMember(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() re.NoError(err) @@ -207,7 +207,7 @@ func TestPrepareChecker(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() re.NoError(err) @@ -256,7 +256,7 @@ func TestPrepareCheckerWithTransferLeader(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() re.NoError(err) diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 3b85cd3cf0d..adf7202454b 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -98,7 +98,7 @@ func TestClusterID(t *testing.T) { re.Equal(clusterID, s.GetClusterID()) } - cluster2, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.InitialClusterToken = "foobar" }) + cluster2, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.InitialClusterToken = "foobar" }) defer cluster2.Destroy() re.NoError(err) err = cluster2.RunInitialServers() diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index 12110be0249..b63b533bc0f 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -37,7 +37,7 @@ func TestHotRegionStorage(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = 1000 * time.Millisecond cfg.Schedule.HotRegionsReservedDays = 1 @@ -145,7 +145,7 @@ func TestHotRegionStorageReservedDayConfigChange(t *testing.T) { interval := 100 * time.Millisecond defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = interval cfg.Schedule.HotRegionsReservedDays = 1 @@ -237,7 +237,7 @@ func TestHotRegionStorageWriteIntervalConfigChange(t *testing.T) { interval := 100 * time.Millisecond defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = interval cfg.Schedule.HotRegionsReservedDays = 1 diff --git a/tests/server/tso/allocator_test.go b/tests/server/tso/allocator_test.go index 3bc4d56ac58..692aec490eb 100644 --- a/tests/server/tso/allocator_test.go +++ b/tests/server/tso/allocator_test.go @@ -132,7 +132,7 @@ func TestPriorityAndDifferentLocalTSO(t *testing.T) { time.Sleep(time.Second * 5) // Join a new dc-location - pd4, err := cluster.Join(ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(ctx, func(conf *config.Config, _ string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) diff --git a/tests/server/tso/consistency_test.go b/tests/server/tso/consistency_test.go index d1c45df7f17..1bf20cce20d 100644 --- a/tests/server/tso/consistency_test.go +++ b/tests/server/tso/consistency_test.go @@ -275,7 +275,7 @@ func (suite *tsoConsistencyTestSuite) TestLocalTSOAfterMemberChanged() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/systemTimeSlow", `return(true)`)) // Join a new dc-location - pd4, err := cluster.Join(suite.ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(suite.ctx, func(conf *config.Config, _ string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) diff --git a/tests/server/tso/global_tso_test.go b/tests/server/tso/global_tso_test.go index 5ae2e6e0f67..f705bdf12b5 100644 --- a/tests/server/tso/global_tso_test.go +++ b/tests/server/tso/global_tso_test.go @@ -137,7 +137,7 @@ func TestLogicalOverflow(t *testing.T) { runCase := func(updateInterval time.Duration) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.TSOUpdatePhysicalInterval = typeutil.Duration{Duration: updateInterval} }) defer cluster.Destroy() diff --git a/tests/server/watch/leader_watch_test.go b/tests/server/watch/leader_watch_test.go index f7765297023..84e16398677 100644 --- a/tests/server/watch/leader_watch_test.go +++ b/tests/server/watch/leader_watch_test.go @@ -35,7 +35,7 @@ func TestWatcher(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.AutoCompactionRetention = "1s" }) defer cluster.Destroy() re.NoError(err) @@ -73,7 +73,7 @@ func TestWatcherCompacted(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.AutoCompactionRetention = "1s" }) defer cluster.Destroy() re.NoError(err) diff --git a/tests/testutil.go b/tests/testutil.go index 106cddc9dfb..5d9905af64c 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -273,14 +273,14 @@ func (s *SchedulingTestEnvironment) RunTestInTwoModes(test func(*TestCluster)) { // RunTestInPDMode is to run test in pd mode. func (s *SchedulingTestEnvironment) RunTestInPDMode(test func(*TestCluster)) { - s.t.Logf("start test %s in pd mode", s.getTestName()) + s.t.Logf("start test %s in pd mode", getTestName()) if _, ok := s.clusters[pdMode]; !ok { s.startCluster(pdMode) } test(s.clusters[pdMode]) } -func (s *SchedulingTestEnvironment) getTestName() string { +func getTestName() string { pc, _, _, _ := runtime.Caller(2) caller := runtime.FuncForPC(pc) if caller == nil || strings.Contains(caller.Name(), "RunTestInTwoModes") { @@ -303,7 +303,7 @@ func (s *SchedulingTestEnvironment) RunTestInAPIMode(test func(*TestCluster)) { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) }() - s.t.Logf("start test %s in api mode", s.getTestName()) + s.t.Logf("start test %s in api mode", getTestName()) if _, ok := s.clusters[apiMode]; !ok { s.startCluster(apiMode) } diff --git a/tests/tso_cluster.go b/tests/tso_cluster.go index 4021613df2a..e1fdb6d69ca 100644 --- a/tests/tso_cluster.go +++ b/tests/tso_cluster.go @@ -76,7 +76,7 @@ func RestartTestTSOCluster( defer wg.Done() clean() serverCfg := cluster.servers[addr].GetConfig() - newServer, newCleanup, err := NewTSOTestServer(newCluster.ctx, serverCfg) + newServer, newCleanup, err := NewTSOTestServer(ctx, serverCfg) serverMap.Store(addr, newServer) cleanupMap.Store(addr, newCleanup) errorMap.Store(addr, err) diff --git a/tools.go b/tools.go index 909f42ab9b5..e5298de2827 100644 --- a/tools.go +++ b/tools.go @@ -20,7 +20,6 @@ package tools import ( _ "github.com/AlekSi/gocov-xml" _ "github.com/axw/gocov/gocov" - _ "github.com/mgechev/revive" _ "github.com/pingcap/errors/errdoc-gen" _ "github.com/pingcap/failpoint/failpoint-ctl" _ "github.com/swaggo/swag/cmd/swag" diff --git a/tools/Makefile b/tools/Makefile index 336cc536949..4195160aff6 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -25,8 +25,6 @@ static: install-tools @ gofmt -s -l -d . 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c $(ROOT_PATH)/.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config $(ROOT_PATH)/revive.toml ./... tidy: @ go mod tidy diff --git a/tools/go.mod b/tools/go.mod index 8287f834471..9d8728f7034 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -31,6 +31,7 @@ require ( github.com/tikv/pd v0.0.0-00010101000000-000000000000 github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 go.etcd.io/etcd v0.5.0-alpha.5.0.20240320135013-950cd5fbe6ca + go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 golang.org/x/text v0.14.0 diff --git a/tools/go.sum b/tools/go.sum index ac6cc75903e..d7c7a4801b1 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -385,6 +385,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -514,6 +516,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/dig v1.9.0 h1:pJTDXKEhRqBI8W7rU7kwT5EgyRZuSMVSFcZolOvKK9U= go.uber.org/dig v1.9.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.12.0 h1:+1+3Cz9M0dFMPy9SW9XUIUHye8bnPUm7q7DroNGWYG4= diff --git a/tools/pd-analysis/analysis/parse_log.go b/tools/pd-analysis/analysis/parse_log.go index 44ae617284f..f096e3fe380 100644 --- a/tools/pd-analysis/analysis/parse_log.go +++ b/tools/pd-analysis/analysis/parse_log.go @@ -42,7 +42,7 @@ type Interpreter interface { } // CompileRegex is to provide regexp for transfer counter. -func (c *TransferCounter) CompileRegex(operator string) (*regexp.Regexp, error) { +func (*TransferCounter) CompileRegex(operator string) (*regexp.Regexp, error) { var r *regexp.Regexp var err error @@ -64,7 +64,7 @@ func (c *TransferCounter) CompileRegex(operator string) (*regexp.Regexp, error) return r, err } -func (c *TransferCounter) parseLine(content string, r *regexp.Regexp) ([]uint64, error) { +func parseLine(content string, r *regexp.Regexp) ([]uint64, error) { results := make([]uint64, 0, 4) subStrings := r.FindStringSubmatch(content) if len(subStrings) == 0 { @@ -78,9 +78,8 @@ func (c *TransferCounter) parseLine(content string, r *regexp.Regexp) ([]uint64, results = append(results, uint64(num)) } return results, nil - } else { - return results, errors.New("Can't parse Log, with " + content) } + return results, errors.New("Can't parse Log, with " + content) } func forEachLine(filename string, solve func(string) error) error { @@ -116,7 +115,7 @@ func forEachLine(filename string, solve func(string) error) error { func isExpectTime(expect, layout string, isBeforeThanExpect bool) func(time.Time) bool { expectTime, err := time.Parse(layout, expect) if err != nil { - return func(current time.Time) bool { + return func(_ time.Time) bool { return true } } @@ -142,14 +141,13 @@ func currentTime(layout string) func(content string) (time.Time, error) { return time.Parse(layout, result[1]) } else if len(result) == 0 { return time.Time{}, nil - } else { - return time.Time{}, errors.New("There is no valid time in log with " + content) } + return time.Time{}, errors.New("There is no valid time in log with " + content) } } // ParseLog is to parse log for transfer counter. -func (c *TransferCounter) ParseLog(filename, start, end, layout string, r *regexp.Regexp) error { +func (*TransferCounter) ParseLog(filename, start, end, layout string, r *regexp.Regexp) error { afterStart := isExpectTime(start, layout, false) beforeEnd := isExpectTime(end, layout, true) getCurrent := currentTime(layout) @@ -161,7 +159,7 @@ func (c *TransferCounter) ParseLog(filename, start, end, layout string, r *regex } // if current line time between start and end if afterStart(current) && beforeEnd(current) { - results, err := c.parseLine(content, r) + results, err := parseLine(content, r) if err != nil { return err } diff --git a/tools/pd-analysis/analysis/parse_log_test.go b/tools/pd-analysis/analysis/parse_log_test.go index ffdcb2137c0..345f70959f8 100644 --- a/tools/pd-analysis/analysis/parse_log_test.go +++ b/tools/pd-analysis/analysis/parse_log_test.go @@ -23,7 +23,7 @@ import ( func transferCounterParseLog(operator, content string, expect []uint64) bool { r, _ := GetTransferCounter().CompileRegex(operator) - results, _ := GetTransferCounter().parseLine(content, r) + results, _ := parseLine(content, r) if len(results) != len(expect) { return false } diff --git a/tools/pd-api-bench/cases/cases.go b/tools/pd-api-bench/cases/cases.go index 473a11d749a..72986df5ed8 100644 --- a/tools/pd-api-bench/cases/cases.go +++ b/tools/pd-api-bench/cases/cases.go @@ -37,6 +37,8 @@ var ( storesID []uint64 ) +const defaultKeyLen = 56 + // InitCluster initializes the cluster. func InitCluster(ctx context.Context, cli pd.Client, httpCli pdHttp.Client) error { statsResp, err := httpCli.GetRegionStatusByKeyRange(ctx, pdHttp.NewKeyRange([]byte(""), []byte("")), false) @@ -221,7 +223,7 @@ func (c *regionsStats) Do(ctx context.Context, cli pdHttp.Client) error { startID := c.regionSample*random*4 + 1 endID := c.regionSample*(random+1)*4 + 1 regionStats, err := cli.GetRegionStatusByKeyRange(ctx, - pdHttp.NewKeyRange(generateKeyForSimulator(startID, 56), generateKeyForSimulator(endID, 56)), false) + pdHttp.NewKeyRange(generateKeyForSimulator(startID), generateKeyForSimulator(endID)), false) if Debug { log.Info("do HTTP case", zap.String("case", c.name), zap.Any("region-stats", regionStats), zap.Error(err)) } @@ -246,7 +248,7 @@ func newUpdateGCSafePoint() func() GRPCCase { } } -func (c *updateGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { +func (*updateGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { s := time.Now().Unix() _, err := cli.UpdateGCSafePoint(ctx, uint64(s)) if err != nil { @@ -270,7 +272,7 @@ func newUpdateServiceGCSafePoint() func() GRPCCase { } } -func (c *updateServiceGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { +func (*updateServiceGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { s := time.Now().Unix() id := rand.Int63n(100) + 1 _, err := cli.UpdateServiceGCSafePoint(ctx, strconv.FormatInt(id, 10), id, uint64(s)) @@ -295,9 +297,9 @@ func newGetRegion() func() GRPCCase { } } -func (c *getRegion) Unary(ctx context.Context, cli pd.Client) error { +func (*getRegion) Unary(ctx context.Context, cli pd.Client) error { id := rand.Intn(totalRegion)*4 + 1 - _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56)) + _, err := cli.GetRegion(ctx, generateKeyForSimulator(id)) if err != nil { return err } @@ -319,9 +321,9 @@ func newGetRegionEnableFollower() func() GRPCCase { } } -func (c *getRegionEnableFollower) Unary(ctx context.Context, cli pd.Client) error { +func (*getRegionEnableFollower) Unary(ctx context.Context, cli pd.Client) error { id := rand.Intn(totalRegion)*4 + 1 - _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56), pd.WithAllowFollowerHandle()) + _, err := cli.GetRegion(ctx, generateKeyForSimulator(id), pd.WithAllowFollowerHandle()) if err != nil { return err } @@ -350,7 +352,7 @@ func (c *scanRegions) Unary(ctx context.Context, cli pd.Client) error { random := rand.Intn(upperBound) startID := c.regionSample*random*4 + 1 endID := c.regionSample*(random+1)*4 + 1 - _, err := cli.ScanRegions(ctx, generateKeyForSimulator(startID, 56), generateKeyForSimulator(endID, 56), c.regionSample) + _, err := cli.ScanRegions(ctx, generateKeyForSimulator(startID), generateKeyForSimulator(endID), c.regionSample) if err != nil { return err } @@ -372,7 +374,7 @@ func newTso() func() GRPCCase { } } -func (c *tso) Unary(ctx context.Context, cli pd.Client) error { +func (*tso) Unary(ctx context.Context, cli pd.Client) error { _, _, err := cli.GetTS(ctx) if err != nil { return err @@ -395,7 +397,7 @@ func newGetStore() func() GRPCCase { } } -func (c *getStore) Unary(ctx context.Context, cli pd.Client) error { +func (*getStore) Unary(ctx context.Context, cli pd.Client) error { storeIdx := rand.Intn(totalStore) _, err := cli.GetStore(ctx, storesID[storeIdx]) if err != nil { @@ -419,7 +421,7 @@ func newGetStores() func() GRPCCase { } } -func (c *getStores) Unary(ctx context.Context, cli pd.Client) error { +func (*getStores) Unary(ctx context.Context, cli pd.Client) error { _, err := cli.GetAllStores(ctx) if err != nil { return err @@ -427,9 +429,8 @@ func (c *getStores) Unary(ctx context.Context, cli pd.Client) error { return nil } -// nolint -func generateKeyForSimulator(id int, keyLen int) []byte { - k := make([]byte, keyLen) +func generateKeyForSimulator(id int) []byte { + k := make([]byte, defaultKeyLen) copy(k, fmt.Sprintf("%010d", id)) return k } @@ -449,7 +450,7 @@ func newGetKV() func() ETCDCase { } } -func (c *getKV) Init(ctx context.Context, cli *clientv3.Client) error { +func (*getKV) Init(ctx context.Context, cli *clientv3.Client) error { for i := 0; i < 100; i++ { _, err := cli.Put(ctx, fmt.Sprintf("/test/0001/%4d", i), fmt.Sprintf("%4d", i)) if err != nil { @@ -459,7 +460,7 @@ func (c *getKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } -func (c *getKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*getKV) Unary(ctx context.Context, cli *clientv3.Client) error { _, err := cli.Get(ctx, "/test/0001", clientv3.WithPrefix()) return err } @@ -479,9 +480,9 @@ func newPutKV() func() ETCDCase { } } -func (c *putKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } +func (*putKV) Init(context.Context, *clientv3.Client) error { return nil } -func (c *putKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*putKV) Unary(ctx context.Context, cli *clientv3.Client) error { _, err := cli.Put(ctx, "/test/0001/0000", "test") return err } @@ -501,9 +502,9 @@ func newDeleteKV() func() ETCDCase { } } -func (c *deleteKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } +func (*deleteKV) Init(context.Context, *clientv3.Client) error { return nil } -func (c *deleteKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*deleteKV) Unary(ctx context.Context, cli *clientv3.Client) error { _, err := cli.Delete(ctx, "/test/0001/0000") return err } @@ -523,9 +524,9 @@ func newTxnKV() func() ETCDCase { } } -func (c *txnKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } +func (*txnKV) Init(context.Context, *clientv3.Client) error { return nil } -func (c *txnKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*txnKV) Unary(ctx context.Context, cli *clientv3.Client) error { txn := cli.Txn(ctx) txn = txn.If(clientv3.Compare(clientv3.Value("/test/0001/0000"), "=", "test")) txn = txn.Then(clientv3.OpPut("/test/0001/0000", "test2")) diff --git a/tools/pd-api-bench/cases/controller.go b/tools/pd-api-bench/cases/controller.go index d6679cad1d9..42eeafe4597 100644 --- a/tools/pd-api-bench/cases/controller.go +++ b/tools/pd-api-bench/cases/controller.go @@ -64,7 +64,7 @@ func (c *Coordinator) GetHTTPCase(name string) (*Config, error) { if controller, ok := c.http[name]; ok { return controller.GetConfig(), nil } - return nil, errors.Errorf("case %v does not exist.", name) + return nil, errors.Errorf("case %v does not exist", name) } // GetGRPCCase returns the gRPC case config. @@ -74,7 +74,7 @@ func (c *Coordinator) GetGRPCCase(name string) (*Config, error) { if controller, ok := c.grpc[name]; ok { return controller.GetConfig(), nil } - return nil, errors.Errorf("case %v does not exist.", name) + return nil, errors.Errorf("case %v does not exist", name) } // GetETCDCase returns the etcd case config. @@ -84,7 +84,7 @@ func (c *Coordinator) GetETCDCase(name string) (*Config, error) { if controller, ok := c.etcd[name]; ok { return controller.GetConfig(), nil } - return nil, errors.Errorf("case %v does not exist.", name) + return nil, errors.Errorf("case %v does not exist", name) } // GetAllHTTPCases returns the all HTTP case configs. diff --git a/tools/pd-api-bench/config/config.go b/tools/pd-api-bench/config/config.go index 675e665ab0a..d1048c0da72 100644 --- a/tools/pd-api-bench/config/config.go +++ b/tools/pd-api-bench/config/config.go @@ -15,7 +15,6 @@ package config import ( - "github.com/BurntSushi/toml" "github.com/pingcap/log" "github.com/pkg/errors" flag "github.com/spf13/pflag" @@ -73,14 +72,13 @@ func (c *Config) Parse(arguments []string) error { } // Load config file if specified. - var meta *toml.MetaData if c.configFile != "" { - meta, err = configutil.ConfigFromFile(c, c.configFile) + _, err = configutil.ConfigFromFile(c, c.configFile) if err != nil { return err } } - c.Adjust(meta) + c.Adjust() // Parse again to replace with command line options. err = c.flagSet.Parse(arguments) @@ -118,7 +116,7 @@ func (c *Config) InitCoordinator(co *cases.Coordinator) { } // Adjust is used to adjust configurations -func (c *Config) Adjust(meta *toml.MetaData) { +func (c *Config) Adjust() { if len(c.Log.Format) == 0 { c.Log.Format = "text" } diff --git a/tools/pd-api-bench/main.go b/tools/pd-api-bench/main.go index dff40555fd6..f9feeeea580 100644 --- a/tools/pd-api-bench/main.go +++ b/tools/pd-api-bench/main.go @@ -341,7 +341,6 @@ func runHTTPServer(cfg *config.Config, co *cases.Coordinator) { } c.IndentedJSON(http.StatusOK, cfg) }) - // nolint engine.Run(cfg.StatusAddr) } diff --git a/tools/pd-backup/pdbackup/backup_test.go b/tools/pd-backup/pdbackup/backup_test.go index b35bf1e8a70..0ab9116ddbe 100644 --- a/tools/pd-backup/pdbackup/backup_test.go +++ b/tools/pd-backup/pdbackup/backup_test.go @@ -83,7 +83,7 @@ func setupServer() (*httptest.Server, *config.Config) { }, } - server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, _ *http.Request) { b, err := json.Marshal(serverConfig) if err != nil { res.WriteHeader(http.StatusInternalServerError) @@ -98,7 +98,7 @@ func setupServer() (*httptest.Server, *config.Config) { return server, serverConfig } -func (s *backupTestSuite) BeforeTest(suiteName, testName string) { +func (s *backupTestSuite) BeforeTest(string, string) { re := s.Require() ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() @@ -124,7 +124,7 @@ func (s *backupTestSuite) BeforeTest(suiteName, testName string) { re.NoError(err) } -func (s *backupTestSuite) AfterTest(suiteName, testName string) { +func (s *backupTestSuite) AfterTest(string, string) { s.etcd.Close() } diff --git a/tools/pd-ctl/pdctl/command/config_command.go b/tools/pd-ctl/pdctl/command/config_command.go index c70c33e26c3..0c3851350cc 100644 --- a/tools/pd-ctl/pdctl/command/config_command.go +++ b/tools/pd-ctl/pdctl/command/config_command.go @@ -212,7 +212,7 @@ func NewDeleteLabelPropertyConfigCommand() *cobra.Command { return sc } -func showConfigCommandFunc(cmd *cobra.Command, args []string) { +func showConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) allR, err := doRequest(cmd, configPrefix, http.MethodGet, header) if err != nil { @@ -268,7 +268,7 @@ var hideConfig = []string{ "scheduler-max-waiting-operator", } -func showScheduleConfigCommandFunc(cmd *cobra.Command, args []string) { +func showScheduleConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) r, err := doRequest(cmd, schedulePrefix, http.MethodGet, header) if err != nil { @@ -278,7 +278,7 @@ func showScheduleConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showReplicationConfigCommandFunc(cmd *cobra.Command, args []string) { +func showReplicationConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) r, err := doRequest(cmd, replicatePrefix, http.MethodGet, header) if err != nil { @@ -288,7 +288,7 @@ func showReplicationConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showLabelPropertyConfigCommandFunc(cmd *cobra.Command, args []string) { +func showLabelPropertyConfigCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, labelPropertyPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get config: %s\n", err) @@ -297,7 +297,7 @@ func showLabelPropertyConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showAllConfigCommandFunc(cmd *cobra.Command, args []string) { +func showAllConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) r, err := doRequest(cmd, configPrefix, http.MethodGet, header) if err != nil { @@ -307,7 +307,7 @@ func showAllConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showClusterVersionCommandFunc(cmd *cobra.Command, args []string) { +func showClusterVersionCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, clusterVersionPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get cluster version: %s\n", err) @@ -316,7 +316,7 @@ func showClusterVersionCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showReplicationModeCommandFunc(cmd *cobra.Command, args []string) { +func showReplicationModeCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, replicationModePrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get replication mode config: %s\n", err) @@ -325,7 +325,7 @@ func showReplicationModeCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showServerCommandFunc(cmd *cobra.Command, args []string) { +func showServerCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, pdServerPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get server config: %s\n", err) @@ -529,7 +529,7 @@ func NewPlacementRulesCommand() *cobra.Command { return c } -func enablePlacementRulesFunc(cmd *cobra.Command, args []string) { +func enablePlacementRulesFunc(cmd *cobra.Command, _ []string) { err := postConfigDataWithPath(cmd, "enable-placement-rules", "true", configPrefix) if err != nil { cmd.Printf("Failed to set config: %s\n", err) @@ -538,7 +538,7 @@ func enablePlacementRulesFunc(cmd *cobra.Command, args []string) { cmd.Println("Success!") } -func disablePlacementRulesFunc(cmd *cobra.Command, args []string) { +func disablePlacementRulesFunc(cmd *cobra.Command, _ []string) { err := postConfigDataWithPath(cmd, "enable-placement-rules", "false", configPrefix) if err != nil { cmd.Printf("Failed to set config: %s\n", err) @@ -547,7 +547,7 @@ func disablePlacementRulesFunc(cmd *cobra.Command, args []string) { cmd.Println("Success!") } -func getPlacementRulesFunc(cmd *cobra.Command, args []string) { +func getPlacementRulesFunc(cmd *cobra.Command, _ []string) { getFlag := func(key string) string { if f := cmd.Flag(key); f != nil { return f.Value.String() @@ -598,7 +598,7 @@ func getPlacementRulesFunc(cmd *cobra.Command, args []string) { cmd.Println("rules saved to file " + file) } -func putPlacementRulesFunc(cmd *cobra.Command, args []string) { +func putPlacementRulesFunc(cmd *cobra.Command, _ []string) { var file string if f := cmd.Flag("in"); f != nil { file = f.Value.String() @@ -712,7 +712,7 @@ func getRuleBundle(cmd *cobra.Command, args []string) { cmd.Printf("rule group saved to file %s\n", file) } -func setRuleBundle(cmd *cobra.Command, args []string) { +func setRuleBundle(cmd *cobra.Command, _ []string) { var file string if f := cmd.Flag("in"); f != nil { file = f.Value.String() @@ -763,7 +763,7 @@ func delRuleBundle(cmd *cobra.Command, args []string) { cmd.Println(res) } -func loadRuleBundle(cmd *cobra.Command, args []string) { +func loadRuleBundle(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) res, err := doRequest(cmd, ruleBundlePrefix, http.MethodGet, header) if err != nil { @@ -788,7 +788,7 @@ func loadRuleBundle(cmd *cobra.Command, args []string) { cmd.Printf("rule group saved to file %s\n", file) } -func saveRuleBundle(cmd *cobra.Command, args []string) { +func saveRuleBundle(cmd *cobra.Command, _ []string) { var file string if f := cmd.Flag("in"); f != nil { file = f.Value.String() diff --git a/tools/pd-ctl/pdctl/command/exit_command.go b/tools/pd-ctl/pdctl/command/exit_command.go index a3d38be97bd..3ead7e54e8e 100644 --- a/tools/pd-ctl/pdctl/command/exit_command.go +++ b/tools/pd-ctl/pdctl/command/exit_command.go @@ -30,6 +30,6 @@ func NewExitCommand() *cobra.Command { return conf } -func exitCommandFunc(cmd *cobra.Command, args []string) { +func exitCommandFunc(*cobra.Command, []string) { os.Exit(0) } diff --git a/tools/pd-ctl/pdctl/command/gc_safepoint_command.go b/tools/pd-ctl/pdctl/command/gc_safepoint_command.go index 80c6328e955..f4a6b6fcfd0 100644 --- a/tools/pd-ctl/pdctl/command/gc_safepoint_command.go +++ b/tools/pd-ctl/pdctl/command/gc_safepoint_command.go @@ -49,7 +49,7 @@ func NewDeleteServiceGCSafepointCommand() *cobra.Command { return l } -func showSSPs(cmd *cobra.Command, args []string) { +func showSSPs(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, serviceGCSafepointPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get service GC safepoint: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/global.go b/tools/pd-ctl/pdctl/command/global.go index 4f20b0b35b4..fa77df6a101 100644 --- a/tools/pd-ctl/pdctl/command/global.go +++ b/tools/pd-ctl/pdctl/command/global.go @@ -126,7 +126,7 @@ var dialClient = &http.Client{ } // RequireHTTPSClient creates a HTTPS client if the related flags are set -func RequireHTTPSClient(cmd *cobra.Command, args []string) error { +func RequireHTTPSClient(cmd *cobra.Command, _ []string) error { caPath, err := cmd.Flags().GetString("cacert") if err == nil && len(caPath) != 0 { certPath, err := cmd.Flags().GetString("cert") diff --git a/tools/pd-ctl/pdctl/command/health_command.go b/tools/pd-ctl/pdctl/command/health_command.go index 1bae871285d..50ac7763d28 100644 --- a/tools/pd-ctl/pdctl/command/health_command.go +++ b/tools/pd-ctl/pdctl/command/health_command.go @@ -34,7 +34,7 @@ func NewHealthCommand() *cobra.Command { return m } -func showHealthCommandFunc(cmd *cobra.Command, args []string) { +func showHealthCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, healthPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Println(err) diff --git a/tools/pd-ctl/pdctl/command/hot_command.go b/tools/pd-ctl/pdctl/command/hot_command.go index f6be9c7176b..77c0ee4d7de 100644 --- a/tools/pd-ctl/pdctl/command/hot_command.go +++ b/tools/pd-ctl/pdctl/command/hot_command.go @@ -107,7 +107,7 @@ func NewHotStoreCommand() *cobra.Command { return cmd } -func showHotStoresCommandFunc(cmd *cobra.Command, args []string) { +func showHotStoresCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, hotStoresPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get store hotspot: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/label_command.go b/tools/pd-ctl/pdctl/command/label_command.go index c0ae3135210..6d95465392f 100644 --- a/tools/pd-ctl/pdctl/command/label_command.go +++ b/tools/pd-ctl/pdctl/command/label_command.go @@ -53,7 +53,7 @@ func NewLabelListStoresCommand() *cobra.Command { return l } -func showLabelsCommandFunc(cmd *cobra.Command, args []string) { +func showLabelsCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, labelsPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get labels: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/member_command.go b/tools/pd-ctl/pdctl/command/member_command.go index c16a879429c..b939935cfb9 100644 --- a/tools/pd-ctl/pdctl/command/member_command.go +++ b/tools/pd-ctl/pdctl/command/member_command.go @@ -89,7 +89,7 @@ func NewLeaderMemberCommand() *cobra.Command { return d } -func showMemberCommandFunc(cmd *cobra.Command, args []string) { +func showMemberCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, membersPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get pd members: %s\n", err) @@ -126,7 +126,7 @@ func deleteMemberByIDCommandFunc(cmd *cobra.Command, args []string) { cmd.Println("Success!") } -func getLeaderMemberCommandFunc(cmd *cobra.Command, args []string) { +func getLeaderMemberCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, leaderMemberPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get the leader of pd members: %s\n", err) @@ -135,7 +135,7 @@ func getLeaderMemberCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func resignLeaderCommandFunc(cmd *cobra.Command, args []string) { +func resignLeaderCommandFunc(cmd *cobra.Command, _ []string) { prefix := leaderMemberPrefix + "/resign" _, err := doRequest(cmd, prefix, http.MethodPost, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/command/min_resolved_ts.go b/tools/pd-ctl/pdctl/command/min_resolved_ts.go index dbf0c47b2de..904f880d82d 100644 --- a/tools/pd-ctl/pdctl/command/min_resolved_ts.go +++ b/tools/pd-ctl/pdctl/command/min_resolved_ts.go @@ -35,7 +35,7 @@ func NewMinResolvedTSCommand() *cobra.Command { } // ShowMinResolvedTS show min resolved ts -func ShowMinResolvedTS(cmd *cobra.Command, args []string) { +func ShowMinResolvedTS(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, minResolvedTSPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get min resolved ts: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/operator.go b/tools/pd-ctl/pdctl/command/operator.go index c57e07db75a..4e7771580de 100644 --- a/tools/pd-ctl/pdctl/command/operator.go +++ b/tools/pd-ctl/pdctl/command/operator.go @@ -375,7 +375,6 @@ func splitRegionCommandFunc(cmd *cobra.Command, args []string) { policy := cmd.Flags().Lookup("policy").Value.String() switch policy { case "scan", "approximate", "usekey": - break default: cmd.Println("Error: unknown policy") return diff --git a/tools/pd-ctl/pdctl/command/ping_command.go b/tools/pd-ctl/pdctl/command/ping_command.go index 6622b079d47..7efa46180d1 100644 --- a/tools/pd-ctl/pdctl/command/ping_command.go +++ b/tools/pd-ctl/pdctl/command/ping_command.go @@ -33,7 +33,7 @@ func NewPingCommand() *cobra.Command { return m } -func showPingCommandFunc(cmd *cobra.Command, args []string) { +func showPingCommandFunc(cmd *cobra.Command, _ []string) { start := time.Now() _, err := doRequest(cmd, pingPrefix, http.MethodGet, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/command/region_command.go b/tools/pd-ctl/pdctl/command/region_command.go index 33191bbe12b..e03de1c62ac 100644 --- a/tools/pd-ctl/pdctl/command/region_command.go +++ b/tools/pd-ctl/pdctl/command/region_command.go @@ -156,7 +156,7 @@ func showRegionCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func scanRegionCommandFunc(cmd *cobra.Command, args []string) { +func scanRegionCommandFunc(cmd *cobra.Command, _ []string) { const limit = 1024 var key []byte for { @@ -533,7 +533,7 @@ func NewRangesWithRangeHolesCommand() *cobra.Command { return r } -func showRangesWithRangeHolesCommandFunc(cmd *cobra.Command, args []string) { +func showRangesWithRangeHolesCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, regionsRangeHolesPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get range holes: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/scheduler.go b/tools/pd-ctl/pdctl/command/scheduler.go index 3799c4a820e..d5deba670ad 100644 --- a/tools/pd-ctl/pdctl/command/scheduler.go +++ b/tools/pd-ctl/pdctl/command/scheduler.go @@ -391,7 +391,7 @@ func NewSlowTrendEvictLeaderSchedulerCommand() *cobra.Command { return c } -func addSchedulerForSplitBucketCommandFunc(cmd *cobra.Command, args []string) { +func addSchedulerForSplitBucketCommandFunc(cmd *cobra.Command, _ []string) { input := make(map[string]any) input["name"] = cmd.Name() postJSON(cmd, schedulersPrefix, input) diff --git a/tools/pd-ctl/pdctl/command/store_command.go b/tools/pd-ctl/pdctl/command/store_command.go index 085483cc5df..bc024d5a2e6 100644 --- a/tools/pd-ctl/pdctl/command/store_command.go +++ b/tools/pd-ctl/pdctl/command/store_command.go @@ -675,7 +675,7 @@ func storeCheckCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showStoresCommandFunc(cmd *cobra.Command, args []string) { +func showStoresCommandFunc(cmd *cobra.Command, _ []string) { prefix := storesPrefix r, err := doRequest(cmd, prefix, http.MethodGet, http.Header{}) if err != nil { @@ -706,7 +706,7 @@ func showAllStoresLimitCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func removeTombStoneCommandFunc(cmd *cobra.Command, args []string) { +func removeTombStoneCommandFunc(cmd *cobra.Command, _ []string) { prefix := path.Join(storesPrefix, "remove-tombstone") _, err := doRequest(cmd, prefix, http.MethodDelete, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/command/unsafe_command.go b/tools/pd-ctl/pdctl/command/unsafe_command.go index 66ef8e6c934..04d272385e7 100644 --- a/tools/pd-ctl/pdctl/command/unsafe_command.go +++ b/tools/pd-ctl/pdctl/command/unsafe_command.go @@ -106,7 +106,7 @@ func removeFailedStoresCommandFunc(cmd *cobra.Command, args []string) { postJSON(cmd, prefix, postInput) } -func removeFailedStoresShowCommandFunc(cmd *cobra.Command, args []string) { +func removeFailedStoresShowCommandFunc(cmd *cobra.Command, _ []string) { var resp string var err error prefix := fmt.Sprintf("%s/remove-failed-stores/show", unsafePrefix) diff --git a/tools/pd-ctl/pdctl/ctl.go b/tools/pd-ctl/pdctl/ctl.go index 5790911d79f..f8eaff5e76e 100644 --- a/tools/pd-ctl/pdctl/ctl.go +++ b/tools/pd-ctl/pdctl/ctl.go @@ -86,7 +86,7 @@ func MainStart(args []string) { // TODO: deprecated rootCmd.Flags().BoolP("detach", "d", true, "Run pdctl without readline.") - rootCmd.Run = func(cmd *cobra.Command, args []string) { + rootCmd.Run = func(cmd *cobra.Command, _ []string) { if v, err := cmd.Flags().GetBool("version"); err == nil && v { versioninfo.Print() return diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index 6776c9851b3..07a7c2aa990 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -568,7 +568,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) re.Contains(string(output), "Success!") // test show - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) f, _ := os.CreateTemp("/tmp", "pd_tests") fname := f.Name() @@ -576,7 +576,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) defer os.RemoveAll(fname) // test load - rules := suite.checkLoadRule(re, pdAddr, fname, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) + rules := checkLoadRule(re, pdAddr, fname, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) // test save rules = append(rules, placement.Rule{ @@ -596,11 +596,11 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) re.NoError(err) // test show group - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}, {placement.DefaultGroupID, "test1"}}, "--group=pd") + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}, {placement.DefaultGroupID, "test1"}}, "--group=pd") // test rule region detail pdTests.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b")) - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}, "--region=1", "--detail") + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}, "--region=1", "--detail") // test delete // need clear up args, so create new a cobra.Command. Otherwise gourp still exists. @@ -609,7 +609,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) os.WriteFile(fname, b, 0600) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) re.NoError(err) - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, "test1"}}, "--group=pd") + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, "test1"}}, "--group=pd") } func (suite *configTestSuite) TestPlacementRuleGroups() { @@ -724,7 +724,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus defer os.RemoveAll(fname) // test load - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -736,7 +736,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(os.WriteFile(fname, b, 0600)) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -745,7 +745,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", placement.DefaultGroupID) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -757,7 +757,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(os.WriteFile(fname, b, 0600)) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -768,7 +768,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus bundles := []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, } - suite.checkLoadRuleBundle(re, pdAddr, fname, bundles) + checkLoadRuleBundle(re, pdAddr, fname, bundles) // test save bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}} @@ -778,7 +778,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(os.WriteFile(fname, b, 0600)) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -791,7 +791,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname, "--partial") re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -810,12 +810,12 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "--regexp", ".*f") re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) } -func (suite *configTestSuite) checkLoadRuleBundle(re *require.Assertions, pdAddr string, fname string, expectValues []placement.GroupBundle) { +func checkLoadRuleBundle(re *require.Assertions, pdAddr string, fname string, expectValues []placement.GroupBundle) { var bundles []placement.GroupBundle cmd := ctl.GetRootCmd() testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server @@ -828,7 +828,7 @@ func (suite *configTestSuite) checkLoadRuleBundle(re *require.Assertions, pdAddr assertBundles(re, bundles, expectValues) } -func (suite *configTestSuite) checkLoadRule(re *require.Assertions, pdAddr string, fname string, expectValues [][2]string) []placement.Rule { +func checkLoadRule(re *require.Assertions, pdAddr string, fname string, expectValues [][2]string) []placement.Rule { var rules []placement.Rule cmd := ctl.GetRootCmd() testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server @@ -844,7 +844,7 @@ func (suite *configTestSuite) checkLoadRule(re *require.Assertions, pdAddr strin return rules } -func (suite *configTestSuite) checkShowRuleKey(re *require.Assertions, pdAddr string, expectValues [][2]string, opts ...string) { +func checkShowRuleKey(re *require.Assertions, pdAddr string, expectValues [][2]string, opts ...string) { var ( rules []placement.Rule fit placement.RegionFit diff --git a/tools/pd-ctl/tests/global_test.go b/tools/pd-ctl/tests/global_test.go index 14b7aafdccd..f4f55e2af89 100644 --- a/tools/pd-ctl/tests/global_test.go +++ b/tools/pd-ctl/tests/global_test.go @@ -34,7 +34,7 @@ const pdControlCallerID = "pd-ctl" func TestSendAndGetComponent(t *testing.T) { re := require.New(t) - handler := func(ctx context.Context, s *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { + handler := func(context.Context, *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { mux := http.NewServeMux() mux.HandleFunc("/pd/api/v1/health", func(w http.ResponseWriter, r *http.Request) { callerID := apiutil.GetCallerIDOnHTTP(r) diff --git a/tools/pd-ctl/tests/hot/hot_test.go b/tools/pd-ctl/tests/hot/hot_test.go index 9d8dbbd123a..7661704aa41 100644 --- a/tools/pd-ctl/tests/hot/hot_test.go +++ b/tools/pd-ctl/tests/hot/hot_test.go @@ -51,7 +51,7 @@ func TestHotTestSuite(t *testing.T) { func (suite *hotTestSuite) SetupSuite() { suite.env = pdTests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Schedule.MaxStoreDownTime.Duration = time.Hour conf.Schedule.HotRegionCacheHitsThreshold = 0 }, @@ -398,7 +398,7 @@ func TestHistoryHotRegions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := pdTests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = 1000 * time.Millisecond cfg.Schedule.HotRegionsReservedDays = 1 @@ -520,7 +520,7 @@ func TestBuckets(t *testing.T) { statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, serverName string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 }) + cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 }) re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() diff --git a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go index 5d85f35dacf..87fd17a97d4 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go @@ -100,7 +100,7 @@ func TestSplitKeyspaceGroup(t *testing.T) { for i := 0; i < 129; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -155,7 +155,7 @@ func TestExternalAllocNodeWhenStart(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -195,7 +195,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -299,7 +299,7 @@ func TestMergeKeyspaceGroup(t *testing.T) { for i := 0; i < 129; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -418,7 +418,7 @@ func TestKeyspaceGroupState(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -509,7 +509,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) diff --git a/tools/pd-ctl/tests/keyspace/keyspace_test.go b/tools/pd-ctl/tests/keyspace/keyspace_test.go index 4c1fb2aadd5..54c25fc2099 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_test.go @@ -47,7 +47,7 @@ func TestKeyspace(t *testing.T) { for i := 1; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) diff --git a/tools/pd-ctl/tests/label/label_test.go b/tools/pd-ctl/tests/label/label_test.go index 9ba6f267ae1..f7370a71872 100644 --- a/tools/pd-ctl/tests/label/label_test.go +++ b/tools/pd-ctl/tests/label/label_test.go @@ -34,7 +34,7 @@ func TestLabel(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, serverName string) { cfg.Replication.StrictlyMatchLabel = false }) + cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.StrictlyMatchLabel = false }) re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() diff --git a/tools/pd-ctl/tests/operator/operator_test.go b/tools/pd-ctl/tests/operator/operator_test.go index 5af73184076..7e5d390c4ce 100644 --- a/tools/pd-ctl/tests/operator/operator_test.go +++ b/tools/pd-ctl/tests/operator/operator_test.go @@ -43,7 +43,7 @@ func TestOperatorTestSuite(t *testing.T) { func (suite *operatorTestSuite) SetupSuite() { suite.env = pdTests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { // TODO: enable placement rules conf.Replication.MaxReplicas = 2 conf.Replication.EnablePlacementRules = false diff --git a/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go b/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go index 5cfc16ffb02..d387a2b87ae 100644 --- a/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go +++ b/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go @@ -58,7 +58,7 @@ func (s *testResourceManagerSuite) TearDownSuite() { func (s *testResourceManagerSuite) TestConfigController() { re := s.Require() - expectCfg := server.ControllerConfig{} + expectCfg := server.Config{} expectCfg.Adjust(nil) // Show controller config checkShow := func() { @@ -69,7 +69,7 @@ func (s *testResourceManagerSuite) TestConfigController() { actualCfg := server.ControllerConfig{} err = json.Unmarshal(output, &actualCfg) re.NoError(err, string(output)) - re.Equal(expectCfg, actualCfg) + re.Equal(expectCfg.Controller, actualCfg) } // Check default config @@ -80,20 +80,20 @@ func (s *testResourceManagerSuite) TestConfigController() { output, err := tests.ExecuteCommand(ctl.GetRootCmd(), args...) re.NoError(err) re.Contains(string(output), "Success!") - expectCfg.LTBMaxWaitDuration = typeutil.Duration{Duration: 1 * time.Hour} + expectCfg.Controller.LTBMaxWaitDuration = typeutil.Duration{Duration: 1 * time.Hour} checkShow() args = []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "enable-controller-trace-log", "true"} output, err = tests.ExecuteCommand(ctl.GetRootCmd(), args...) re.NoError(err) re.Contains(string(output), "Success!") - expectCfg.EnableControllerTraceLog = true + expectCfg.Controller.EnableControllerTraceLog = true checkShow() args = []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "write-base-cost", "2"} output, err = tests.ExecuteCommand(ctl.GetRootCmd(), args...) re.NoError(err) re.Contains(string(output), "Success!") - expectCfg.RequestUnit.WriteBaseCost = 2 + expectCfg.Controller.RequestUnit.WriteBaseCost = 2 checkShow() } diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index b8cd5c13a79..afb97401168 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -581,7 +581,7 @@ func TestStoreTLS(t *testing.T) { CertFile: filepath.Join(certPath, "pd-server.pem"), TrustedCAFile: filepath.Join(certPath, "ca.pem"), } - cluster, err := pdTests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := pdTests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Security.TLSConfig = grpcutil.TLSConfig{ KeyPath: tlsInfo.KeyFile, CertPath: tlsInfo.CertFile, diff --git a/tools/pd-heartbeat-bench/main.go b/tools/pd-heartbeat-bench/main.go index 44d1b001269..ec5e2506e6b 100644 --- a/tools/pd-heartbeat-bench/main.go +++ b/tools/pd-heartbeat-bench/main.go @@ -192,7 +192,7 @@ type Regions struct { updateFlow []int } -func (rs *Regions) init(cfg *config.Config, options *config.Options) { +func (rs *Regions) init(cfg *config.Config) { rs.regions = make([]*pdpb.RegionHeartbeatRequest, 0, cfg.RegionCount) rs.updateRound = 0 @@ -507,7 +507,7 @@ func main() { initClusterID(ctx, cli) go runHTTPServer(cfg, options) regions := new(Regions) - regions.init(cfg, options) + regions.init(cfg) log.Info("finish init regions") stores := newStores(cfg.StoreCount) stores.update(regions) diff --git a/tools/pd-simulator/main.go b/tools/pd-simulator/main.go index 5d781757b39..73f4a0bba12 100644 --- a/tools/pd-simulator/main.go +++ b/tools/pd-simulator/main.go @@ -128,8 +128,11 @@ func runHTTPServer() { http.Handle("/pprof/allocs", pprof.Handler("allocs")) http.Handle("/pprof/block", pprof.Handler("block")) http.Handle("/pprof/goroutine", pprof.Handler("goroutine")) - // nolint - http.ListenAndServe(*statusAddress, nil) + server := &http.Server{ + Addr: *statusAddress, + ReadHeaderTimeout: 3 * time.Second, + } + server.ListenAndServe() } // NewSingleServer creates a pd server for simulator. diff --git a/tools/pd-simulator/simulator/cases/add_nodes.go b/tools/pd-simulator/simulator/cases/add_nodes.go index 833ead89f53..241b34a9473 100644 --- a/tools/pd-simulator/simulator/cases/add_nodes.go +++ b/tools/pd-simulator/simulator/cases/add_nodes.go @@ -55,7 +55,7 @@ func newAddNodes() *Case { } threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := true leaderCounts := make([]int, 0, storeNum) regionCounts := make([]int, 0, storeNum) diff --git a/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go b/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go index 410d5e984c7..59b0b54e1ca 100644 --- a/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go +++ b/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go @@ -73,7 +73,7 @@ func newAddNodesDynamic() *Case { simCase.Events = []EventDescriptor{e} threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := numNodes == storeNum leaderCounts := make([]int, 0, numNodes) regionCounts := make([]int, 0, numNodes) diff --git a/tools/pd-simulator/simulator/cases/balance_leader.go b/tools/pd-simulator/simulator/cases/balance_leader.go index 8f2b87e3180..bbc7ce97f68 100644 --- a/tools/pd-simulator/simulator/cases/balance_leader.go +++ b/tools/pd-simulator/simulator/cases/balance_leader.go @@ -51,7 +51,7 @@ func newBalanceLeader() *Case { } threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := true leaderCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/cases/balance_region.go b/tools/pd-simulator/simulator/cases/balance_region.go index 0a013cf3876..3b0c46f1670 100644 --- a/tools/pd-simulator/simulator/cases/balance_region.go +++ b/tools/pd-simulator/simulator/cases/balance_region.go @@ -59,7 +59,7 @@ func newRedundantBalanceRegion() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) diff --git a/tools/pd-simulator/simulator/cases/delete_nodes.go b/tools/pd-simulator/simulator/cases/delete_nodes.go index 33f7ada14a0..4ba8e5064a4 100644 --- a/tools/pd-simulator/simulator/cases/delete_nodes.go +++ b/tools/pd-simulator/simulator/cases/delete_nodes.go @@ -72,7 +72,7 @@ func newDeleteNodes() *Case { simCase.Events = []EventDescriptor{e} threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := numNodes == noEmptyStoreNum leaderCounts := make([]int, 0, numNodes) regionCounts := make([]int, 0, numNodes) diff --git a/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go b/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go index bd056bdf9c1..7fa50e56197 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go +++ b/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go @@ -62,7 +62,7 @@ func newLabelNotMatch1() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) @@ -128,7 +128,7 @@ func newLabelIsolation1() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) @@ -189,7 +189,7 @@ func newLabelIsolation2() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) diff --git a/tools/pd-simulator/simulator/cases/diagnose_rule.go b/tools/pd-simulator/simulator/cases/diagnose_rule.go index 6cd76c854b7..15c5942d810 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_rule.go +++ b/tools/pd-simulator/simulator/cases/diagnose_rule.go @@ -100,7 +100,7 @@ func newRule1() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) @@ -179,7 +179,7 @@ func newRule2() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) diff --git a/tools/pd-simulator/simulator/cases/event_inner.go b/tools/pd-simulator/simulator/cases/event_inner.go index 3edf26b72a5..72521584e88 100644 --- a/tools/pd-simulator/simulator/cases/event_inner.go +++ b/tools/pd-simulator/simulator/cases/event_inner.go @@ -25,7 +25,7 @@ type WriteFlowOnSpotDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *WriteFlowOnSpotDescriptor) Type() string { +func (*WriteFlowOnSpotDescriptor) Type() string { return "write-flow-on-spot" } @@ -35,7 +35,7 @@ type WriteFlowOnRegionDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *WriteFlowOnRegionDescriptor) Type() string { +func (*WriteFlowOnRegionDescriptor) Type() string { return "write-flow-on-region" } @@ -45,7 +45,7 @@ type ReadFlowOnRegionDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *ReadFlowOnRegionDescriptor) Type() string { +func (*ReadFlowOnRegionDescriptor) Type() string { return "read-flow-on-region" } @@ -55,7 +55,7 @@ type AddNodesDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *AddNodesDescriptor) Type() string { +func (*AddNodesDescriptor) Type() string { return "add-nodes" } @@ -65,6 +65,6 @@ type DeleteNodesDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *DeleteNodesDescriptor) Type() string { +func (*DeleteNodesDescriptor) Type() string { return "delete-nodes" } diff --git a/tools/pd-simulator/simulator/cases/hot_read.go b/tools/pd-simulator/simulator/cases/hot_read.go index 9df4f8796e8..d4ec6831d95 100644 --- a/tools/pd-simulator/simulator/cases/hot_read.go +++ b/tools/pd-simulator/simulator/cases/hot_read.go @@ -67,12 +67,12 @@ func newHotRead() *Case { } } e := &ReadFlowOnRegionDescriptor{} - e.Step = func(tick int64) map[uint64]int64 { + e.Step = func(int64) map[uint64]int64 { return readFlow } simCase.Events = []EventDescriptor{e} // Checker description - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { leaderCount := make([]int, storeNum) for id := range readFlow { leaderStore := regions.GetRegion(id).GetLeader().GetStoreId() diff --git a/tools/pd-simulator/simulator/cases/hot_write.go b/tools/pd-simulator/simulator/cases/hot_write.go index 8efe32c5657..8428afa75b5 100644 --- a/tools/pd-simulator/simulator/cases/hot_write.go +++ b/tools/pd-simulator/simulator/cases/hot_write.go @@ -66,14 +66,14 @@ func newHotWrite() *Case { } } e := &WriteFlowOnRegionDescriptor{} - e.Step = func(tick int64) map[uint64]int64 { + e.Step = func(int64) map[uint64]int64 { return writeFlow } simCase.Events = []EventDescriptor{e} // Checker description - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { leaderCount := make([]int, storeNum) peerCount := make([]int, storeNum) for id := range writeFlow { diff --git a/tools/pd-simulator/simulator/cases/import_data.go b/tools/pd-simulator/simulator/cases/import_data.go index 0e7f7770a48..6cf3b79a736 100644 --- a/tools/pd-simulator/simulator/cases/import_data.go +++ b/tools/pd-simulator/simulator/cases/import_data.go @@ -78,7 +78,7 @@ func newImportData() *Case { checkCount := uint64(0) var newRegionCount [][3]int var allRegionCount [][3]int - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { leaderDist := make(map[uint64]int) peerDist := make(map[uint64]int) leaderTotal := 0 diff --git a/tools/pd-simulator/simulator/cases/makeup_down_replica.go b/tools/pd-simulator/simulator/cases/makeup_down_replica.go index 57eb2dd1f53..86c9b4cac1d 100644 --- a/tools/pd-simulator/simulator/cases/makeup_down_replica.go +++ b/tools/pd-simulator/simulator/cases/makeup_down_replica.go @@ -64,7 +64,7 @@ func newMakeupDownReplicas() *Case { } simCase.Events = []EventDescriptor{e} - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { sum := 0 regionCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/cases/region_merge.go b/tools/pd-simulator/simulator/cases/region_merge.go index 501803d439e..3d5d57f804f 100644 --- a/tools/pd-simulator/simulator/cases/region_merge.go +++ b/tools/pd-simulator/simulator/cases/region_merge.go @@ -54,7 +54,7 @@ func newRegionMerge() *Case { // Checker description threshold := 0.05 mergeRatio := 4 // when max-merge-region-size is 20, per region will reach 40MB - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { sum := 0 regionCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/cases/region_split.go b/tools/pd-simulator/simulator/cases/region_split.go index 6a69386cb6b..b85cd319494 100644 --- a/tools/pd-simulator/simulator/cases/region_split.go +++ b/tools/pd-simulator/simulator/cases/region_split.go @@ -48,7 +48,7 @@ func newRegionSplit() *Case { simCase.RegionSplitKeys = 10000 // Events description e := &WriteFlowOnSpotDescriptor{} - e.Step = func(tick int64) map[string]int64 { + e.Step = func(int64) map[string]int64 { return map[string]int64{ "foobar": 8 * units.MiB, } @@ -56,7 +56,7 @@ func newRegionSplit() *Case { simCase.Events = []EventDescriptor{e} // Checker description - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := true regionCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/client.go b/tools/pd-simulator/simulator/client.go index 81453307afa..808c991e97f 100644 --- a/tools/pd-simulator/simulator/client.go +++ b/tools/pd-simulator/simulator/client.go @@ -380,7 +380,7 @@ func (c *client) StoreHeartbeat(ctx context.Context, stats *pdpb.StoreStats) err return nil } -func (c *client) RegionHeartbeat(ctx context.Context, region *core.RegionInfo) error { +func (c *client) RegionHeartbeat(_ context.Context, region *core.RegionInfo) error { c.reportRegionHeartbeatCh <- region return nil } diff --git a/tools/pd-simulator/simulator/task.go b/tools/pd-simulator/simulator/task.go index b1c609b503d..a19854b53ba 100644 --- a/tools/pd-simulator/simulator/task.go +++ b/tools/pd-simulator/simulator/task.go @@ -261,7 +261,7 @@ type transferLeader struct { toPeers []*metapb.Peer } -func (t *transferLeader) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (t *transferLeader) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true toPeer := t.toPeers[0] // TODO: Support selection logic if peer := region.GetPeer(toPeer.GetId()); peer == nil || peer.GetRole() != toPeer.GetRole() || core.IsLearner(peer) { @@ -313,7 +313,7 @@ type promoteLearner struct { peer *metapb.Peer } -func (pl *promoteLearner) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (pl *promoteLearner) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true peer := region.GetPeer(pl.peer.GetId()) opts := checkAndCreateChangePeerOption(region, peer, metapb.PeerRole_Learner, metapb.PeerRole_Voter) @@ -327,7 +327,7 @@ type demoteVoter struct { peer *metapb.Peer } -func (dv *demoteVoter) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (dv *demoteVoter) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true peer := region.GetPeer(dv.peer.GetId()) opts := checkAndCreateChangePeerOption(region, peer, metapb.PeerRole_Voter, metapb.PeerRole_Learner) @@ -342,7 +342,7 @@ type changePeerV2Enter struct { demoteVoters []*metapb.Peer } -func (ce *changePeerV2Enter) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (ce *changePeerV2Enter) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true var opts []core.RegionCreateOption for _, pl := range ce.promoteLearners { @@ -367,7 +367,7 @@ func (ce *changePeerV2Enter) tick(engine *RaftEngine, region *core.RegionInfo) ( type changePeerV2Leave struct{} -func (cl *changePeerV2Leave) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (*changePeerV2Leave) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true var opts []core.RegionCreateOption for _, peer := range region.GetPeers() { diff --git a/tools/pd-tso-bench/main.go b/tools/pd-tso-bench/main.go index 236e78c7808..b4101bda270 100644 --- a/tools/pd-tso-bench/main.go +++ b/tools/pd-tso-bench/main.go @@ -382,10 +382,10 @@ func reqWorker(ctx context.Context, pdClients []pd.Client, clientIdx int, durCh i := 0 for ; i < maxRetryTime; i++ { + var ticker *time.Ticker if *maxTSOSendIntervalMilliseconds > 0 { sleepBeforeGetTS := time.Duration(rand.Intn(*maxTSOSendIntervalMilliseconds)) * time.Millisecond - ticker := time.NewTicker(sleepBeforeGetTS) - defer ticker.Stop() + ticker = time.NewTicker(sleepBeforeGetTS) select { case <-reqCtx.Done(): case <-ticker.C: @@ -394,9 +394,11 @@ func reqWorker(ctx context.Context, pdClients []pd.Client, clientIdx int, durCh } _, _, err = pdCli.GetLocalTS(reqCtx, *dcLocation) if errors.Cause(err) == context.Canceled { + ticker.Stop() return } if err == nil { + ticker.Stop() break } log.Error(fmt.Sprintf("%v", err)) diff --git a/tools/pd-ut/README.md b/tools/pd-ut/README.md new file mode 100644 index 00000000000..77b59bea4f7 --- /dev/null +++ b/tools/pd-ut/README.md @@ -0,0 +1,66 @@ +# pd-ut + +pd-ut is a tool to run unit tests for PD. + +## Build + +1. [Go](https://golang.org/) Version 1.21 or later +2. In the root directory of the [PD project](https://github.com/tikv/pd), use the `make pd-ut` command to compile and generate `bin/pd-ut` + +## Usage + +This section describes how to use the pd-ut tool. + +### brief run all tests +```shell +make ut +``` + + +### run by pd-ut + +- You should `make failpoint-enable` before running the tests. +- And after running the tests, you should `make failpoint-disable` and `make clean-test` to disable the failpoint and clean the environment. + +#### Flags description + +```shell +// run all tests +pd-ut + +// show usage +pd-ut -h + +// list all packages +pd-ut list + +// list test cases of a single package +pd-ut list $package + +// list test cases that match a pattern +pd-ut list $package 'r:$regex' + +// run all tests +pd-ut run + +// run test all cases of a single package +pd-ut run $package + +// run test cases of a single package +pd-ut run $package $test + +// run test cases that match a pattern +pd-ut run $package 'r:$regex' + +// build all test package +pd-ut build + +// build a test package +pd-ut build xxx + +// write the junitfile +pd-ut run --junitfile xxx + +// test with race flag +pd-ut run --race +``` diff --git a/tools/pd-ut/ut.go b/tools/pd-ut/ut.go new file mode 100644 index 00000000000..69a83f007b6 --- /dev/null +++ b/tools/pd-ut/ut.go @@ -0,0 +1,803 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "log" + "math/rand" + "os" + "os/exec" + "path" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + // Set the correct value when it runs inside docker. + _ "go.uber.org/automaxprocs" +) + +func usage() bool { + msg := `// run all tests +pd-ut + +// show usage +pd-ut -h + +// list all packages +pd-ut list + +// list test cases of a single package +pd-ut list $package + +// list test cases that match a pattern +pd-ut list $package 'r:$regex' + +// run all tests +pd-ut run + +// run test all cases of a single package +pd-ut run $package + +// run test cases of a single package +pd-ut run $package $test + +// run test cases that match a pattern +pd-ut run $package 'r:$regex' + +// build all test package +pd-ut build + +// build a test package +pd-ut build xxx + +// write the junitfile +pd-ut run --junitfile xxx + +// test with race flag +pd-ut run --race` + + fmt.Println(msg) + return true +} + +const modulePath = "github.com/tikv/pd" + +var ( + // runtime + p int + buildParallel int + workDir string + // arguments + race bool + junitFile string +) + +func main() { + race = handleFlag("--race") + junitFile = stripFlag("--junitfile") + + // Get the correct count of CPU if it's in docker. + p = runtime.GOMAXPROCS(0) + // We use 2 * p for `go build` to make it faster. + buildParallel = p * 2 + var err error + workDir, err = os.Getwd() + if err != nil { + fmt.Println("os.Getwd() error", err) + } + + var isSucceed bool + // run all tests + if len(os.Args) == 1 { + isSucceed = cmdRun() + } + + if len(os.Args) >= 2 { + switch os.Args[1] { + case "list": + isSucceed = cmdList(os.Args[2:]...) + case "build": + isSucceed = cmdBuild(os.Args[2:]...) + case "run": + isSucceed = cmdRun(os.Args[2:]...) + default: + isSucceed = usage() + } + } + if !isSucceed { + os.Exit(1) + } +} + +func cmdList(args ...string) bool { + pkgs, err := listPackages() + if err != nil { + log.Println("list package error", err) + return false + } + + // list all packages + if len(args) == 0 { + for _, pkg := range pkgs { + fmt.Println(pkg) + } + return false + } + + // list test case of a single package + if len(args) == 1 || len(args) == 2 { + pkg := args[0] + pkgs = filter(pkgs, func(s string) bool { return s == pkg }) + if len(pkgs) != 1 { + fmt.Println("package not exist", pkg) + return false + } + + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + exist, err := testBinaryExist(pkg) + if err != nil { + log.Println("check test binary existence error", err) + return false + } + if !exist { + fmt.Println("no test case in ", pkg) + return false + } + + res := listTestCases(pkg, nil) + + if len(args) == 2 { + res, err = filterTestCases(res, args[1]) + if err != nil { + log.Println("filter test cases error", err) + return false + } + } + + for _, x := range res { + fmt.Println(x.test) + } + } + return true +} + +func cmdBuild(args ...string) bool { + pkgs, err := listPackages() + if err != nil { + log.Println("list package error", err) + return false + } + + // build all packages + if len(args) == 0 { + err := buildTestBinaryMulti(pkgs) + if err != nil { + fmt.Println("build package error", pkgs, err) + return false + } + return true + } + + // build test binary of a single package + if len(args) >= 1 { + pkg := args[0] + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + } + return true +} + +func cmdRun(args ...string) bool { + var err error + pkgs, err := listPackages() + if err != nil { + fmt.Println("list packages error", err) + return false + } + tasks := make([]task, 0, 5000) + start := time.Now() + // run all tests + if len(args) == 0 { + err := buildTestBinaryMulti(pkgs) + if err != nil { + fmt.Println("build package error", pkgs, err) + return false + } + + for _, pkg := range pkgs { + exist, err := testBinaryExist(pkg) + if err != nil { + fmt.Println("check test binary existence error", err) + return false + } + if !exist { + fmt.Println("no test case in ", pkg) + continue + } + + tasks = listTestCases(pkg, tasks) + } + } + + // run tests for a single package + if len(args) == 1 { + pkg := args[0] + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + exist, err := testBinaryExist(pkg) + if err != nil { + log.Println("check test binary existence error", err) + return false + } + + if !exist { + fmt.Println("no test case in ", pkg) + return false + } + tasks = listTestCases(pkg, tasks) + } + + // run a single test + if len(args) == 2 { + pkg := args[0] + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + exist, err := testBinaryExist(pkg) + if err != nil { + log.Println("check test binary existence error", err) + return false + } + if !exist { + fmt.Println("no test case in ", pkg) + return false + } + + tasks = listTestCases(pkg, tasks) + tasks, err = filterTestCases(tasks, args[1]) + if err != nil { + log.Println("filter test cases error", err) + return false + } + } + + fmt.Printf("building task finish, parallelism=%d, count=%d, takes=%v\n", buildParallel, len(tasks), time.Since(start)) + + taskCh := make(chan task, 100) + works := make([]numa, p) + var wg sync.WaitGroup + for i := 0; i < p; i++ { + wg.Add(1) + go works[i].worker(&wg, taskCh) + } + + shuffle(tasks) + + start = time.Now() + for _, task := range tasks { + taskCh <- task + } + close(taskCh) + wg.Wait() + fmt.Println("run all tasks takes", time.Since(start)) + + if junitFile != "" { + out := collectTestResults(works) + f, err := os.Create(junitFile) + if err != nil { + fmt.Println("create junit file fail:", err) + return false + } + if err := write(f, out); err != nil { + fmt.Println("write junit file error:", err) + return false + } + } + + for _, work := range works { + if work.Fail { + return false + } + } + return true +} + +// stripFlag strip the '--flag xxx' from the command line os.Args +// Example of the os.Args changes +// Before: ut run pkg TestXXX --junitfile yyy +// After: ut run pkg TestXXX +// The value of the flag is returned. +func stripFlag(flag string) string { + var res string + tmp := os.Args[:0] + // Iter to the flag + var i int + for ; i < len(os.Args); i++ { + if os.Args[i] == flag { + i++ + break + } + tmp = append(tmp, os.Args[i]) + } + // Handle the flag + if i < len(os.Args) { + res = os.Args[i] + i++ + } + // Iter the remain flags + for ; i < len(os.Args); i++ { + tmp = append(tmp, os.Args[i]) + } + + os.Args = tmp + return res +} + +func handleFlag(f string) (found bool) { + tmp := os.Args[:0] + for i := 0; i < len(os.Args); i++ { + if os.Args[i] == f { + found = true + continue + } + tmp = append(tmp, os.Args[i]) + } + os.Args = tmp + return +} + +type task struct { + pkg string + test string +} + +func (t *task) String() string { + return t.pkg + " " + t.test +} + +func listTestCases(pkg string, tasks []task) []task { + newCases := listNewTestCases(pkg) + for _, c := range newCases { + tasks = append(tasks, task{pkg, c}) + } + + return tasks +} + +func filterTestCases(tasks []task, arg1 string) ([]task, error) { + if strings.HasPrefix(arg1, "r:") { + r, err := regexp.Compile(arg1[2:]) + if err != nil { + return nil, err + } + tmp := tasks[:0] + for _, task := range tasks { + if r.MatchString(task.test) { + tmp = append(tmp, task) + } + } + return tmp, nil + } + tmp := tasks[:0] + for _, task := range tasks { + if strings.Contains(task.test, arg1) { + tmp = append(tmp, task) + } + } + return tmp, nil +} + +func listPackages() ([]string, error) { + cmd := exec.Command("go", "list", "./...") + ss, err := cmdToLines(cmd) + if err != nil { + return nil, withTrace(err) + } + + ret := ss[:0] + for _, s := range ss { + if !strings.HasPrefix(s, modulePath) { + continue + } + pkg := s[len(modulePath)+1:] + if skipDIR(pkg) { + continue + } + ret = append(ret, pkg) + } + return ret, nil +} + +type numa struct { + Fail bool + results []testResult +} + +func (n *numa) worker(wg *sync.WaitGroup, ch chan task) { + defer wg.Done() + for t := range ch { + res := n.runTestCase(t.pkg, t.test) + if res.Failure != nil { + fmt.Println("[FAIL] ", t.pkg, t.test) + fmt.Fprintf(os.Stderr, "err=%s\n%s", res.err.Error(), res.Failure.Contents) + n.Fail = true + } + n.results = append(n.results, res) + } +} + +type testResult struct { + JUnitTestCase + d time.Duration + err error +} + +func (n *numa) runTestCase(pkg string, fn string) testResult { + res := testResult{ + JUnitTestCase: JUnitTestCase{ + ClassName: path.Join(modulePath, pkg), + Name: fn, + }, + } + + var buf bytes.Buffer + var err error + var start time.Time + for i := 0; i < 3; i++ { + cmd := n.testCommand(pkg, fn) + cmd.Dir = path.Join(workDir, pkg) + // Combine the test case output, so the run result for failed cases can be displayed. + cmd.Stdout = &buf + cmd.Stderr = &buf + + start = time.Now() + err = cmd.Run() + if err != nil { + var exitError *exec.ExitError + if errors.As(err, &exitError) { + // Retry 3 times to get rid of the weird error: + switch err.Error() { + case "signal: segmentation fault (core dumped)": + buf.Reset() + continue + case "signal: trace/breakpoint trap (core dumped)": + buf.Reset() + continue + } + if strings.Contains(buf.String(), "panic during panic") { + buf.Reset() + continue + } + } + } + break + } + if err != nil { + res.Failure = &JUnitFailure{ + Message: "Failed", + Contents: buf.String(), + } + res.err = err + } + + res.d = time.Since(start) + res.Time = formatDurationAsSeconds(res.d) + return res +} + +func collectTestResults(workers []numa) JUnitTestSuites { + version := goVersion() + // pkg => test cases + pkgs := make(map[string][]JUnitTestCase) + durations := make(map[string]time.Duration) + + // The test result in workers are shuffled, so group by the packages here + for _, n := range workers { + for _, res := range n.results { + cases, ok := pkgs[res.ClassName] + if !ok { + cases = make([]JUnitTestCase, 0, 10) + } + cases = append(cases, res.JUnitTestCase) + pkgs[res.ClassName] = cases + durations[res.ClassName] += res.d + } + } + + suites := JUnitTestSuites{} + // Turn every package result to a suite. + for pkg, cases := range pkgs { + suite := JUnitTestSuite{ + Tests: len(cases), + Failures: failureCases(cases), + Time: formatDurationAsSeconds(durations[pkg]), + Name: pkg, + Properties: packageProperties(version), + TestCases: cases, + } + suites.Suites = append(suites.Suites, suite) + } + return suites +} + +func failureCases(input []JUnitTestCase) int { + sum := 0 + for _, v := range input { + if v.Failure != nil { + sum++ + } + } + return sum +} + +func (*numa) testCommand(pkg string, fn string) *exec.Cmd { + args := make([]string, 0, 10) + exe := "./" + testFileName(pkg) + args = append(args, "-test.cpu", "1") + if !race { + args = append(args, []string{"-test.timeout", "2m"}...) + } else { + // it takes a longer when race is enabled. so it is set more timeout value. + args = append(args, []string{"-test.timeout", "5m"}...) + } + + // core.test -test.run TestClusteredPrefixColum + args = append(args, "-test.run", "^"+fn+"$") + + return exec.Command(exe, args...) +} + +func skipDIR(pkg string) bool { + skipDir := []string{"tests", "bin", "cmd", "tools"} + for _, ignore := range skipDir { + if strings.HasPrefix(pkg, ignore) { + return true + } + } + return false +} + +// buildTestBinaryMulti is much faster than build the test packages one by one. +func buildTestBinaryMulti(pkgs []string) error { + // go test --exec=xprog --tags=tso_function_test,deadlock -vet=off --count=0 $(pkgs) + xprogPath := path.Join(workDir, "bin/xprog") + packages := make([]string, 0, len(pkgs)) + for _, pkg := range pkgs { + packages = append(packages, path.Join(modulePath, pkg)) + } + + p := strconv.Itoa(buildParallel) + cmd := exec.Command("go", "test", "-p", p, "--exec", xprogPath, "-vet", "off", "--tags=tso_function_test,deadlock") + cmd.Args = append(cmd.Args, packages...) + cmd.Dir = workDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return withTrace(err) + } + return nil +} + +func buildTestBinary(pkg string) error { + //nolint:gosec + cmd := exec.Command("go", "test", "-c", "-vet", "off", "--tags=tso_function_test,deadlock", "-o", testFileName(pkg), "-v") + if race { + cmd.Args = append(cmd.Args, "-race") + } + cmd.Dir = path.Join(workDir, pkg) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return withTrace(err) + } + return nil +} + +func testBinaryExist(pkg string) (bool, error) { + _, err := os.Stat(testFileFullPath(pkg)) + if err != nil { + var pathError *os.PathError + if errors.As(err, &pathError) { + return false, nil + } + } + return true, withTrace(err) +} + +func testFileName(pkg string) string { + _, file := path.Split(pkg) + return file + ".test.bin" +} + +func testFileFullPath(pkg string) string { + return path.Join(workDir, pkg, testFileName(pkg)) +} + +func listNewTestCases(pkg string) []string { + exe := "./" + testFileName(pkg) + + // core.test -test.list Test + cmd := exec.Command(exe, "-test.list", "Test") + cmd.Dir = path.Join(workDir, pkg) + var buf bytes.Buffer + cmd.Stdout = &buf + err := cmd.Run() + res := strings.Split(buf.String(), "\n") + if err != nil && len(res) == 0 { + fmt.Println("err ==", err) + } + return filter(res, func(s string) bool { + return strings.HasPrefix(s, "Test") && s != "TestT" && s != "TestBenchDaily" + }) +} + +func cmdToLines(cmd *exec.Cmd) ([]string, error) { + res, err := cmd.Output() + if err != nil { + return nil, withTrace(err) + } + ss := bytes.Split(res, []byte{'\n'}) + ret := make([]string, len(ss)) + for i, s := range ss { + ret[i] = string(s) + } + return ret, nil +} + +func filter(input []string, f func(string) bool) []string { + ret := input[:0] + for _, s := range input { + if f(s) { + ret = append(ret, s) + } + } + return ret +} + +func shuffle(tasks []task) { + for i := 0; i < len(tasks); i++ { + pos := rand.Intn(len(tasks)) + tasks[i], tasks[pos] = tasks[pos], tasks[i] + } +} + +type errWithStack struct { + err error + buf []byte +} + +func (e *errWithStack) Error() string { + return e.err.Error() + "\n" + string(e.buf) +} + +func withTrace(err error) error { + if err == nil { + return err + } + var errStack *errWithStack + if errors.As(err, &errStack) { + return err + } + var stack [4096]byte + sz := runtime.Stack(stack[:], false) + return &errWithStack{err, stack[:sz]} +} + +func formatDurationAsSeconds(d time.Duration) string { + return fmt.Sprintf("%f", d.Seconds()) +} + +func packageProperties(goVersion string) []JUnitProperty { + return []JUnitProperty{ + {Name: "go.version", Value: goVersion}, + } +} + +// goVersion returns the version as reported by the go binary in PATH. This +// version will not be the same as runtime.Version, which is always the version +// of go used to build the gotestsum binary. +// +// To skip the os/exec call set the GOVERSION environment variable to the +// desired value. +func goVersion() string { + if version, ok := os.LookupEnv("GOVERSION"); ok { + return version + } + cmd := exec.Command("go", "version") + out, err := cmd.Output() + if err != nil { + return "unknown" + } + return strings.TrimPrefix(strings.TrimSpace(string(out)), "go version ") +} + +func write(out io.Writer, suites JUnitTestSuites) error { + doc, err := xml.MarshalIndent(suites, "", "\t") + if err != nil { + return err + } + _, err = out.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = out.Write(doc) + return err +} + +// JUnitTestSuites is a collection of JUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + Suites []JUnitTestSuite +} + +// JUnitTestSuite is a single JUnit test suite which may contain many +// testcases. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time string `xml:"time,attr"` + Name string `xml:"name,attr"` + Properties []JUnitProperty `xml:"properties>property,omitempty"` + TestCases []JUnitTestCase +} + +// JUnitTestCase is a single test case with its result. +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + ClassName string `xml:"classname,attr"` + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"` + Failure *JUnitFailure `xml:"failure,omitempty"` +} + +// JUnitSkipMessage contains the reason why a testcase was skipped. +type JUnitSkipMessage struct { + Message string `xml:"message,attr"` +} + +// JUnitProperty represents a key/value pair used to define properties. +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitFailure contains data related to a failed test. +type JUnitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Contents string `xml:",chardata"` +} diff --git a/tools/pd-ut/xprog.go b/tools/pd-ut/xprog.go new file mode 100644 index 00000000000..cf3e9b295e2 --- /dev/null +++ b/tools/pd-ut/xprog.go @@ -0,0 +1,119 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//go:build xprog +// +build xprog + +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +func main() { + // See https://github.com/golang/go/issues/15513#issuecomment-773994959 + // go test --exec=xprog ./... + // Command line args looks like: + // '$CWD/xprog /tmp/go-build2662369829/b1382/aggfuncs.test -test.paniconexit0 -test.timeout=10m0s' + // This program moves the test binary /tmp/go-build2662369829/b1382/aggfuncs.test to someplace else for later use. + + // Extract the current work directory + cwd := os.Args[0] + cwd = cwd[:len(cwd)-len("bin/xprog")] + + testBinaryPath := os.Args[1] + dir, _ := filepath.Split(testBinaryPath) + + // Extract the package info from /tmp/go-build2662369829/b1382/importcfg.link + pkg := getPackageInfo(dir) + + const prefix = "github.com/tikv/pd/" + if !strings.HasPrefix(pkg, prefix) { + os.Exit(-3) + } + + // github.com/tikv/pd/server/api/api.test => server/api/api + pkg = pkg[len(prefix) : len(pkg)-len(".test")] + + _, file := filepath.Split(pkg) + + // The path of the destination file looks like $CWD/server/api/api.test.bin + newName := filepath.Join(cwd, pkg, file+".test.bin") + + if err1 := os.Rename(testBinaryPath, newName); err1 != nil { + // Rename fail, handle error like "invalid cross-device linkcd tools/check" + err1 = MoveFile(testBinaryPath, newName) + if err1 != nil { + os.Exit(-4) + } + } +} + +func getPackageInfo(dir string) string { + // Read the /tmp/go-build2662369829/b1382/importcfg.link file to get the package information + f, err := os.Open(filepath.Join(dir, "importcfg.link")) + if err != nil { + os.Exit(-1) + } + defer f.Close() + + r := bufio.NewReader(f) + line, _, err := r.ReadLine() + if err != nil { + os.Exit(-2) + } + start := strings.IndexByte(string(line), ' ') + end := strings.IndexByte(string(line), '=') + pkg := string(line[start+1 : end]) + return pkg +} + +func MoveFile(srcPath, dstPath string) error { + inputFile, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("couldn't open source file: %s", err) + } + outputFile, err := os.Create(dstPath) + if err != nil { + inputFile.Close() + return fmt.Errorf("couldn't open dst file: %s", err) + } + defer outputFile.Close() + _, err = io.Copy(outputFile, inputFile) + inputFile.Close() + if err != nil { + return fmt.Errorf("writing to output file failed: %s", err) + } + + // Handle the permissions + si, err := os.Stat(srcPath) + if err != nil { + return fmt.Errorf("stat error: %s", err) + } + err = os.Chmod(dstPath, si.Mode()) + if err != nil { + return fmt.Errorf("chmod error: %s", err) + } + + // The copy was successful, so now delete the original file + err = os.Remove(srcPath) + if err != nil { + return fmt.Errorf("failed removing original file: %s", err) + } + return nil +}