From 9c48e6bd845aaf00055490f414348dc904032eeb Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 2 Apr 2024 18:24:59 +0800 Subject: [PATCH 1/8] improve the linter and fix some bugs Signed-off-by: Ryan Leung --- .golangci.yml | 147 ++++++++++++++++++ Makefile | 3 - client/Makefile | 2 - client/client.go | 8 +- client/keyspace_client.go | 2 +- client/mock_pd_service_discovery.go | 28 ++-- client/pd_service_discovery.go | 8 +- .../resource_group/controller/controller.go | 13 +- client/resource_group/controller/limiter.go | 3 +- client/resource_group/controller/model.go | 7 +- client/resource_group/controller/testutil.go | 2 +- client/retry/backoff_test.go | 2 +- client/testutil/check_env_dummy.go | 2 +- client/tlsutil/tlsconfig.go | 6 +- client/tso_batch_controller.go | 2 +- client/tso_dispatcher.go | 3 +- client/tso_service_discovery.go | 6 +- client/tso_stream.go | 4 +- go.mod | 6 - go.sum | 21 --- pkg/audit/audit.go | 2 +- pkg/autoscaling/calculation.go | 2 +- pkg/autoscaling/calculation_test.go | 2 +- pkg/autoscaling/prometheus_test.go | 14 +- pkg/btree/btree_generic.go | 2 +- pkg/btree/btree_generic_test.go | 2 +- pkg/core/metrics.go | 26 ++-- pkg/core/region.go | 4 +- pkg/core/store_test.go | 2 +- pkg/core/storelimit/sliding_window.go | 5 +- pkg/core/storelimit/store_limit.go | 6 +- pkg/dashboard/adapter/redirector_test.go | 4 +- pkg/dashboard/dashboard.go | 2 +- pkg/election/leadership_test.go | 10 +- pkg/encryption/key_manager_test.go | 6 +- pkg/encryption/kms.go | 2 +- pkg/errs/errs_test.go | 4 +- pkg/mcs/metastorage/server/grpc_service.go | 4 +- pkg/mcs/resourcemanager/server/config.go | 7 +- pkg/mcs/resourcemanager/server/config_test.go | 2 +- .../resourcemanager/server/grpc_service.go | 14 +- pkg/mcs/scheduling/server/apis/v1/api.go | 2 +- pkg/mcs/scheduling/server/cluster.go | 6 +- pkg/mcs/scheduling/server/config/config.go | 16 +- pkg/mcs/scheduling/server/config/watcher.go | 4 +- pkg/mcs/scheduling/server/grpc_service.go | 12 +- pkg/mcs/scheduling/server/rule/watcher.go | 18 +-- pkg/mcs/server/server.go | 2 +- pkg/mcs/tso/server/config.go | 4 +- pkg/mcs/tso/server/config_test.go | 2 +- pkg/mcs/tso/server/grpc_service.go | 8 +- pkg/mcs/tso/server/server.go | 8 +- pkg/member/participant.go | 4 +- pkg/mock/mockcluster/mockcluster.go | 2 +- pkg/mock/mockhbstream/mockhbstream.go | 4 +- pkg/mock/mockid/mockid.go | 2 +- pkg/ratelimit/controller_test.go | 4 +- pkg/replication/replication_mode_test.go | 2 +- pkg/schedule/checker/merge_checker.go | 2 +- pkg/schedule/checker/replica_checker.go | 2 +- pkg/schedule/checker/replica_strategy.go | 6 +- pkg/schedule/checker/rule_checker.go | 6 +- pkg/schedule/checker/split_checker.go | 2 +- pkg/schedule/config/config.go | 4 +- pkg/schedule/filter/candidates_test.go | 8 +- pkg/schedule/filter/filters.go | 44 +++--- pkg/schedule/filter/region_filters.go | 4 +- pkg/schedule/handler/handler.go | 8 +- pkg/schedule/labeler/labeler_test.go | 2 +- pkg/schedule/operator/create_operator.go | 4 +- pkg/schedule/operator/create_operator_test.go | 2 +- pkg/schedule/operator/operator.go | 1 + pkg/schedule/operator/operator_controller.go | 6 +- .../operator/operator_controller_test.go | 16 +- pkg/schedule/operator/operator_test.go | 75 ++++----- pkg/schedule/operator/status_tracker.go | 3 +- pkg/schedule/operator/step.go | 56 +++---- pkg/schedule/schedulers/balance_leader.go | 6 +- pkg/schedule/schedulers/balance_region.go | 2 +- pkg/schedule/schedulers/balance_test.go | 4 +- pkg/schedule/schedulers/balance_witness.go | 4 +- pkg/schedule/schedulers/base_scheduler.go | 14 +- pkg/schedule/schedulers/evict_leader.go | 12 +- pkg/schedule/schedulers/evict_slow_store.go | 8 +- pkg/schedule/schedulers/evict_slow_trend.go | 10 +- pkg/schedule/schedulers/grant_hot_region.go | 10 +- pkg/schedule/schedulers/grant_leader.go | 12 +- pkg/schedule/schedulers/hot_region.go | 12 +- pkg/schedule/schedulers/hot_region_config.go | 2 +- pkg/schedule/schedulers/hot_region_test.go | 6 +- pkg/schedule/schedulers/hot_region_v2.go | 8 +- pkg/schedule/schedulers/init.go | 50 +++--- pkg/schedule/schedulers/label.go | 6 +- pkg/schedule/schedulers/random_merge.go | 8 +- pkg/schedule/schedulers/scatter_range.go | 6 +- pkg/schedule/schedulers/scheduler.go | 2 +- pkg/schedule/schedulers/shuffle_hot_region.go | 6 +- pkg/schedule/schedulers/shuffle_leader.go | 6 +- pkg/schedule/schedulers/shuffle_region.go | 6 +- .../schedulers/shuffle_region_config.go | 2 +- pkg/schedule/schedulers/split_bucket.go | 6 +- .../schedulers/transfer_witness_leader.go | 14 +- pkg/schedule/splitter/region_splitter.go | 3 +- pkg/schedule/splitter/region_splitter_test.go | 2 +- pkg/statistics/buckets/hot_bucket_task.go | 4 +- pkg/statistics/collector.go | 10 +- pkg/statistics/hot_cache_task.go | 2 +- pkg/statistics/hot_peer_cache.go | 26 ++-- pkg/statistics/slow_stat.go | 4 +- pkg/statistics/store_collection.go | 6 +- pkg/statistics/store_collection_test.go | 2 +- pkg/storage/endpoint/keyspace.go | 10 +- pkg/storage/endpoint/meta.go | 4 +- pkg/storage/endpoint/rule.go | 12 +- pkg/storage/endpoint/tso_keyspace_group.go | 6 +- pkg/syncer/client_test.go | 6 +- pkg/tso/global_allocator.go | 4 +- pkg/tso/keyspace_group_manager.go | 12 +- pkg/tso/local_allocator.go | 4 +- pkg/tso/tso.go | 4 +- pkg/utils/apiutil/apiutil.go | 5 +- pkg/utils/etcdutil/etcdutil_test.go | 14 +- pkg/utils/etcdutil/health_checker.go | 4 +- pkg/utils/logutil/log.go | 2 +- pkg/utils/metricutil/metricutil_test.go | 2 +- pkg/utils/tempurl/check_env_dummy.go | 2 +- pkg/utils/testutil/api_check.go | 3 +- pkg/utils/tsoutil/tso_dispatcher.go | 8 +- pkg/utils/tsoutil/tso_proto_factory.go | 4 +- pkg/utils/tsoutil/tso_request.go | 6 +- pkg/window/counter_test.go | 2 +- pkg/window/policy_test.go | 2 +- plugin/scheduler_example/evict_leader.go | 14 +- server/api/admin.go | 2 +- server/api/cluster.go | 4 +- server/api/config.go | 10 +- server/api/health.go | 4 +- server/api/hot_status.go | 2 +- server/api/member.go | 6 +- server/api/member_test.go | 4 +- server/api/operator.go | 2 +- server/api/plugin_disable.go | 6 +- server/api/pprof.go | 18 +-- server/api/region.go | 2 +- server/api/region_test.go | 24 +-- server/api/router.go | 4 +- server/api/service_gc_safepoint.go | 2 +- server/api/service_middleware.go | 2 +- server/api/stats_test.go | 2 +- server/api/status.go | 2 +- server/api/store_test.go | 12 +- server/api/trend.go | 6 +- server/api/version.go | 2 +- server/cluster/cluster.go | 4 +- server/cluster/cluster_test.go | 8 +- server/cluster/cluster_worker.go | 12 +- server/cluster/scheduling_controller.go | 8 +- server/config/persist_options.go | 4 +- server/forward.go | 10 +- server/grpc_service.go | 42 ++--- server/handler.go | 9 -- server/keyspace_service.go | 2 +- server/server.go | 6 +- server/testutil.go | 2 +- tests/cluster.go | 8 +- tests/dashboard/service_test.go | 4 +- tests/integrations/Makefile | 2 - tests/integrations/client/client_test.go | 55 ++++--- tests/integrations/client/client_tls_test.go | 6 +- tests/integrations/client/gc_client_test.go | 1 + .../integrations/client/global_config_test.go | 62 ++++---- tests/integrations/client/http_client_test.go | 22 +-- .../mcs/discovery/register_test.go | 2 +- .../mcs/keyspace/tso_keyspace_group_test.go | 8 +- .../resourcemanager/resource_manager_test.go | 27 ++-- tests/integrations/mcs/tso/api_test.go | 2 +- tests/integrations/mcs/tso/proxy_test.go | 30 ++-- tests/integrations/realcluster/Makefile | 2 - tests/registry/registry_test.go | 6 +- tests/server/api/api_test.go | 10 +- tests/server/api/checker_test.go | 12 +- tests/server/api/operator_test.go | 30 +--- tests/server/api/region_test.go | 18 +-- tests/server/api/rule_test.go | 48 +++--- tests/server/api/scheduler_test.go | 44 +++--- tests/server/cluster/cluster_test.go | 6 +- tests/server/config/config_test.go | 22 +-- tests/server/join/join_test.go | 4 +- tests/server/keyspace/keyspace_test.go | 2 +- tests/server/member/member_test.go | 2 +- .../region_syncer/region_syncer_test.go | 8 +- tests/server/server_test.go | 2 +- .../server/storage/hot_region_storage_test.go | 6 +- tests/server/tso/allocator_test.go | 2 +- tests/server/tso/consistency_test.go | 2 +- tests/server/tso/global_tso_test.go | 2 +- tests/server/watch/leader_watch_test.go | 4 +- tests/testutil.go | 6 +- tests/tso_cluster.go | 2 +- tools.go | 1 - tools/Makefile | 2 - tools/pd-analysis/analysis/parse_log.go | 16 +- tools/pd-analysis/analysis/parse_log_test.go | 2 +- tools/pd-api-bench/cases/cases.go | 30 ++-- tools/pd-api-bench/cases/controller.go | 6 +- tools/pd-api-bench/config/config.go | 8 +- tools/pd-backup/pdbackup/backup_test.go | 6 +- tools/pd-ctl/pdctl/command/config_command.go | 30 ++-- tools/pd-ctl/pdctl/command/exit_command.go | 2 +- .../pdctl/command/gc_safepoint_command.go | 2 +- tools/pd-ctl/pdctl/command/global.go | 2 +- tools/pd-ctl/pdctl/command/health_command.go | 2 +- tools/pd-ctl/pdctl/command/hot_command.go | 2 +- tools/pd-ctl/pdctl/command/label_command.go | 2 +- tools/pd-ctl/pdctl/command/member_command.go | 6 +- tools/pd-ctl/pdctl/command/min_resolved_ts.go | 2 +- tools/pd-ctl/pdctl/command/operator.go | 1 - tools/pd-ctl/pdctl/command/ping_command.go | 2 +- tools/pd-ctl/pdctl/command/region_command.go | 4 +- tools/pd-ctl/pdctl/command/scheduler.go | 2 +- tools/pd-ctl/pdctl/command/store_command.go | 4 +- tools/pd-ctl/pdctl/command/unsafe_command.go | 2 +- tools/pd-ctl/pdctl/ctl.go | 2 +- tools/pd-ctl/tests/config/config_test.go | 32 ++-- tools/pd-ctl/tests/global_test.go | 2 +- tools/pd-ctl/tests/hot/hot_test.go | 6 +- .../tests/keyspace/keyspace_group_test.go | 12 +- tools/pd-ctl/tests/keyspace/keyspace_test.go | 2 +- tools/pd-ctl/tests/label/label_test.go | 2 +- tools/pd-ctl/tests/operator/operator_test.go | 2 +- tools/pd-ctl/tests/store/store_test.go | 2 +- tools/pd-heartbeat-bench/main.go | 4 +- .../pd-simulator/simulator/cases/add_nodes.go | 2 +- .../simulator/cases/add_nodes_dynamic.go | 2 +- .../simulator/cases/balance_leader.go | 2 +- .../simulator/cases/balance_region.go | 2 +- .../simulator/cases/delete_nodes.go | 2 +- .../cases/diagnose_label_isolation.go | 6 +- .../simulator/cases/diagnose_rule.go | 4 +- .../simulator/cases/event_inner.go | 10 +- .../pd-simulator/simulator/cases/hot_read.go | 4 +- .../pd-simulator/simulator/cases/hot_write.go | 4 +- .../simulator/cases/import_data.go | 2 +- .../simulator/cases/makeup_down_replica.go | 2 +- .../simulator/cases/region_merge.go | 2 +- .../simulator/cases/region_split.go | 4 +- tools/pd-simulator/simulator/client.go | 2 +- tools/pd-simulator/simulator/task.go | 10 +- tools/pd-tso-bench/main.go | 6 +- 249 files changed, 1096 insertions(+), 1023 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 0e5028634ae..283de8e96b0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -13,6 +13,7 @@ linters: - bodyclose - testifylint - gofmt + - revive disable: - errcheck linters-settings: @@ -52,3 +53,149 @@ linters-settings: rewrite-rules: - pattern: "interface{}" replacement: "any" + revive: + ignore-generated-header: false + severity: error + confidence: 0.8 + rules: + - name: atomic + severity: warning + exclude: [""] + disabled: false + - name: blank-imports + severity: warning + exclude: [""] + disabled: false + - name: confusing-naming + severity: warning + disabled: false + exclude: [""] + - name: confusing-results + severity: warning + disabled: false + exclude: [""] + - name: context-as-argument + severity: warning + disabled: false + exclude: [""] + arguments: + - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness" + - name: datarace + severity: warning + disabled: false + exclude: [""] + - name: defer + severity: warning + disabled: false + exclude: [""] + arguments: + - ["call-chain", "loop"] + - name: dot-imports + severity: warning + disabled: false + exclude: [""] + - name: duplicated-imports + severity: warning + disabled: false + exclude: [""] + - name: empty-block + severity: warning + disabled: false + exclude: [""] + - name: empty-lines + severity: warning + disabled: false + exclude: [""] + - name: error-return + severity: warning + disabled: false + exclude: [""] + - name: error-strings + severity: warning + disabled: false + exclude: [""] + - name: error-naming + severity: warning + disabled: false + exclude: [""] + - name: exported + severity: warning + disabled: false + exclude: [""] + arguments: + - "checkPrivateReceivers" + - "sayRepetitiveInsteadOfStutters" + - name: identical-branches + severity: warning + disabled: false + exclude: [""] + - name: if-return + severity: warning + disabled: false + exclude: [""] + - name: modifies-parameter + severity: warning + disabled: false + exclude: [""] + - name: optimize-operands-order + severity: warning + disabled: false + exclude: [""] + - name: package-comments + severity: warning + disabled: false + exclude: [""] + - name: range + severity: warning + disabled: false + exclude: [""] + - name: range-val-in-closure + severity: warning + disabled: false + exclude: [""] + - name: range-val-address + severity: warning + disabled: false + exclude: [""] + - name: receiver-naming + severity: warning + disabled: false + exclude: [""] + - name: indent-error-flow + severity: warning + disabled: false + exclude: [""] + - name: superfluous-else + severity: warning + disabled: false + exclude: [""] + - name: unnecessary-stmt + severity: warning + disabled: false + exclude: [""] + - name: unreachable-code + severity: warning + disabled: false + exclude: [""] + - name: unused-parameter + severity: warning + disabled: false + exclude: [""] + arguments: + - allowRegex: "^_" + - name: unused-receiver + severity: warning + disabled: false + exclude: [""] + - name: useless-break + severity: warning + disabled: false + exclude: [""] + - name: var-naming + severity: warning + disabled: false + exclude: [""] + - name: waitgroup-by-value + severity: warning + disabled: false + exclude: [""] diff --git a/Makefile b/Makefile index d78ddcdd65e..205896c377a 100644 --- a/Makefile +++ b/Makefile @@ -184,9 +184,6 @@ static: install-tools pre-build @ gofmt -s -l -d $(PACKAGE_DIRECTORIES) 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run --verbose $(PACKAGE_DIRECTORIES) --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config revive.toml $(PACKAGES) - @ for mod in $(SUBMODULES); do cd $$mod && $(MAKE) static && cd $(ROOT_PATH) > /dev/null; done # Because CI downloads the dashboard code and runs gofmt, we can't add this check into static now. diff --git a/client/Makefile b/client/Makefile index 3328bfe8d11..3e8f6b0d383 100644 --- a/client/Makefile +++ b/client/Makefile @@ -45,8 +45,6 @@ static: install-tools @ gofmt -s -l -d . 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c ../.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config ../revive.toml ./... tidy: @ go mod tidy diff --git a/client/client.go b/client/client.go index b9535aa504e..6ec8e653839 100644 --- a/client/client.go +++ b/client/client.go @@ -431,12 +431,12 @@ func NewAPIContextV1() APIContext { } // GetAPIVersion returns the API version. -func (apiCtx *apiContextV1) GetAPIVersion() (version APIVersion) { +func (*apiContextV1) GetAPIVersion() (version APIVersion) { return V1 } // GetKeyspaceName returns the keyspace name. -func (apiCtx *apiContextV1) GetKeyspaceName() (keyspaceName string) { +func (*apiContextV1) GetKeyspaceName() (keyspaceName string) { return "" } @@ -453,7 +453,7 @@ func NewAPIContextV2(keyspaceName string) APIContext { } // GetAPIVersion returns the API version. -func (apiCtx *apiContextV2) GetAPIVersion() (version APIVersion) { +func (*apiContextV2) GetAPIVersion() (version APIVersion) { return V2 } @@ -912,7 +912,7 @@ func handleRegionResponse(res *pdpb.GetRegionResponse) *Region { return r } -func (c *client) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string, opts ...GetRegionOption) (*Region, error) { +func (c *client) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string, _ ...GetRegionOption) (*Region, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span = span.Tracer().StartSpan("pdclient.GetRegionFromMember", opentracing.ChildOf(span.Context())) defer span.Finish() diff --git a/client/keyspace_client.go b/client/keyspace_client.go index 340ecd0250e..e52a4f85f05 100644 --- a/client/keyspace_client.go +++ b/client/keyspace_client.go @@ -128,7 +128,7 @@ func (c *client) UpdateKeyspaceState(ctx context.Context, id uint32, state keysp // It returns a stream of slices of keyspace metadata. // The first message in stream contains all current keyspaceMeta, // all subsequent messages contains new put events for all keyspaces. -func (c *client) WatchKeyspaces(ctx context.Context) (chan []*keyspacepb.KeyspaceMeta, error) { +func (*client) WatchKeyspaces(context.Context) (chan []*keyspacepb.KeyspaceMeta, error) { return nil, errors.Errorf("WatchKeyspaces unimplemented") } diff --git a/client/mock_pd_service_discovery.go b/client/mock_pd_service_discovery.go index 17613a2f9e4..f1fabd0a1d2 100644 --- a/client/mock_pd_service_discovery.go +++ b/client/mock_pd_service_discovery.go @@ -56,19 +56,19 @@ func (m *mockPDServiceDiscovery) GetAllServiceClients() []ServiceClient { return m.clients } -func (m *mockPDServiceDiscovery) GetClusterID() uint64 { return 0 } -func (m *mockPDServiceDiscovery) GetKeyspaceID() uint32 { return 0 } -func (m *mockPDServiceDiscovery) GetKeyspaceGroupID() uint32 { return 0 } -func (m *mockPDServiceDiscovery) GetServiceURLs() []string { return nil } -func (m *mockPDServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { return nil } -func (m *mockPDServiceDiscovery) GetClientConns() *sync.Map { return nil } -func (m *mockPDServiceDiscovery) GetServingURL() string { return "" } -func (m *mockPDServiceDiscovery) GetBackupURLs() []string { return nil } -func (m *mockPDServiceDiscovery) GetServiceClient() ServiceClient { return nil } -func (m *mockPDServiceDiscovery) GetOrCreateGRPCConn(url string) (*grpc.ClientConn, error) { +func (*mockPDServiceDiscovery) GetClusterID() uint64 { return 0 } +func (*mockPDServiceDiscovery) GetKeyspaceID() uint32 { return 0 } +func (*mockPDServiceDiscovery) GetKeyspaceGroupID() uint32 { return 0 } +func (*mockPDServiceDiscovery) GetServiceURLs() []string { return nil } +func (*mockPDServiceDiscovery) GetServingEndpointClientConn() *grpc.ClientConn { return nil } +func (*mockPDServiceDiscovery) GetClientConns() *sync.Map { return nil } +func (*mockPDServiceDiscovery) GetServingURL() string { return "" } +func (*mockPDServiceDiscovery) GetBackupURLs() []string { return nil } +func (*mockPDServiceDiscovery) GetServiceClient() ServiceClient { return nil } +func (*mockPDServiceDiscovery) GetOrCreateGRPCConn(string) (*grpc.ClientConn, error) { return nil, nil } -func (m *mockPDServiceDiscovery) ScheduleCheckMemberChanged() {} -func (m *mockPDServiceDiscovery) CheckMemberChanged() error { return nil } -func (m *mockPDServiceDiscovery) AddServingURLSwitchedCallback(callbacks ...func()) {} -func (m *mockPDServiceDiscovery) AddServiceURLsSwitchedCallback(callbacks ...func()) {} +func (*mockPDServiceDiscovery) ScheduleCheckMemberChanged() {} +func (*mockPDServiceDiscovery) CheckMemberChanged() error { return nil } +func (*mockPDServiceDiscovery) AddServingURLSwitchedCallback(...func()) {} +func (*mockPDServiceDiscovery) AddServiceURLsSwitchedCallback(...func()) {} diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index defb797b7ca..97e82ec3321 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -247,9 +247,9 @@ func (c *pdServiceClient) NeedRetry(pdErr *pdpb.Error, err error) bool { return !(err == nil && pdErr == nil) } -type errFn func(pdErr *pdpb.Error) bool +type errFn func(*pdpb.Error) bool -func emptyErrorFn(pdErr *pdpb.Error) bool { +func emptyErrorFn(*pdpb.Error) bool { return false } @@ -618,7 +618,7 @@ func (c *pdServiceDiscovery) checkLeaderHealth(ctx context.Context) { } func (c *pdServiceDiscovery) checkFollowerHealth(ctx context.Context) { - c.followers.Range(func(key, value any) bool { + c.followers.Range(func(_, value any) bool { // To ensure that the leader's healthy check is not delayed, shorten the duration. ctx, cancel := context.WithTimeout(ctx, MemberHealthCheckInterval/3) defer cancel() @@ -661,7 +661,7 @@ func (c *pdServiceDiscovery) SetKeyspaceID(keyspaceID uint32) { } // GetKeyspaceGroupID returns the ID of the keyspace group -func (c *pdServiceDiscovery) GetKeyspaceGroupID() uint32 { +func (*pdServiceDiscovery) GetKeyspaceGroupID() uint32 { // PD/API service only supports the default keyspace group return defaultKeySpaceGroupID } diff --git a/client/resource_group/controller/controller.go b/client/resource_group/controller/controller.go index 750a3c6e48f..79bd6a9c3a6 100755 --- a/client/resource_group/controller/controller.go +++ b/client/resource_group/controller/controller.go @@ -398,8 +398,7 @@ func (c *ResourceGroupsController) Start(ctx context.Context) { } case gc := <-c.tokenBucketUpdateChan: - now := gc.run.now - go gc.handleTokenBucketUpdateEvent(c.loopCtx, now) + go gc.handleTokenBucketUpdateEvent(c.loopCtx) } } }() @@ -473,7 +472,7 @@ func (c *ResourceGroupsController) cleanUpResourceGroup() { } func (c *ResourceGroupsController) executeOnAllGroups(f func(controller *groupCostController)) { - c.groupsController.Range(func(name, value any) bool { + c.groupsController.Range(func(_, value any) bool { f(value.(*groupCostController)) return true }) @@ -504,7 +503,7 @@ func (c *ResourceGroupsController) handleTokenBucketResponse(resp []*rmpb.TokenB func (c *ResourceGroupsController) collectTokenBucketRequests(ctx context.Context, source string, typ selectType) { c.run.currentRequests = make([]*rmpb.TokenBucketRequest, 0) - c.groupsController.Range(func(name, value any) bool { + c.groupsController.Range(func(_, value any) bool { gc := value.(*groupCostController) request := gc.collectRequestAndConsumption(typ) if request != nil { @@ -856,7 +855,7 @@ func (gc *groupCostController) resetEmergencyTokenAcquisition() { } } -func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context, now time.Time) { +func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context) { switch gc.mode { case rmpb.GroupMode_RawMode: for _, counter := range gc.run.resourceTokens { @@ -873,7 +872,7 @@ func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context, counter.notify.setupNotificationCh = nil threshold := counter.notify.setupNotificationThreshold counter.notify.mu.Unlock() - counter.limiter.SetupNotificationThreshold(now, threshold) + counter.limiter.SetupNotificationThreshold(threshold) case <-ctx.Done(): return } @@ -894,7 +893,7 @@ func (gc *groupCostController) handleTokenBucketUpdateEvent(ctx context.Context, counter.notify.setupNotificationCh = nil threshold := counter.notify.setupNotificationThreshold counter.notify.mu.Unlock() - counter.limiter.SetupNotificationThreshold(now, threshold) + counter.limiter.SetupNotificationThreshold(threshold) case <-ctx.Done(): return } diff --git a/client/resource_group/controller/limiter.go b/client/resource_group/controller/limiter.go index 7e76934643f..9b343350d75 100644 --- a/client/resource_group/controller/limiter.go +++ b/client/resource_group/controller/limiter.go @@ -218,7 +218,8 @@ func (lim *Limiter) Reserve(ctx context.Context, waitDuration time.Duration, now } // SetupNotificationThreshold enables the notification at the given threshold. -func (lim *Limiter) SetupNotificationThreshold(now time.Time, threshold float64) { +// FIXME: is it expected? +func (lim *Limiter) SetupNotificationThreshold(threshold float64) { lim.mu.Lock() defer lim.mu.Unlock() lim.notifyThreshold = threshold diff --git a/client/resource_group/controller/model.go b/client/resource_group/controller/model.go index dedc2ed7359..9e86de69abb 100644 --- a/client/resource_group/controller/model.go +++ b/client/resource_group/controller/model.go @@ -75,8 +75,7 @@ func newKVCalculator(cfg *RUConfig) *KVCalculator { } // Trickle ... -func (kc *KVCalculator) Trickle(*rmpb.Consumption) { -} +func (*KVCalculator) Trickle(*rmpb.Consumption) {} // BeforeKVRequest ... func (kc *KVCalculator) BeforeKVRequest(consumption *rmpb.Consumption, req RequestInfo) { @@ -166,11 +165,11 @@ func (dsc *SQLCalculator) Trickle(consumption *rmpb.Consumption) { } // BeforeKVRequest ... -func (dsc *SQLCalculator) BeforeKVRequest(consumption *rmpb.Consumption, req RequestInfo) { +func (*SQLCalculator) BeforeKVRequest(*rmpb.Consumption, RequestInfo) { } // AfterKVRequest ... -func (dsc *SQLCalculator) AfterKVRequest(consumption *rmpb.Consumption, req RequestInfo, res ResponseInfo) { +func (*SQLCalculator) AfterKVRequest(*rmpb.Consumption, RequestInfo, ResponseInfo) { } func getRUValueFromConsumption(custom *rmpb.Consumption, typ rmpb.RequestUnitType) float64 { diff --git a/client/resource_group/controller/testutil.go b/client/resource_group/controller/testutil.go index 4df8c9bba0d..01a9c3af1fc 100644 --- a/client/resource_group/controller/testutil.go +++ b/client/resource_group/controller/testutil.go @@ -52,7 +52,7 @@ func (tri *TestRequestInfo) StoreID() uint64 { } // ReplicaNumber implements the RequestInfo interface. -func (tri *TestRequestInfo) ReplicaNumber() int64 { +func (*TestRequestInfo) ReplicaNumber() int64 { return 1 } diff --git a/client/retry/backoff_test.go b/client/retry/backoff_test.go index c877860b5ae..5aa651a1b53 100644 --- a/client/retry/backoff_test.go +++ b/client/retry/backoff_test.go @@ -95,7 +95,7 @@ func TestBackoffer(t *testing.T) { // Test the retryable checker. execCount = 0 bo = InitialBackoffer(base, max, total) - bo.SetRetryableChecker(func(err error) bool { + bo.SetRetryableChecker(func(error) bool { return execCount < 2 }) err = bo.Exec(ctx, func() error { diff --git a/client/testutil/check_env_dummy.go b/client/testutil/check_env_dummy.go index 2fbcbd1a9e7..c8f4d268c9d 100644 --- a/client/testutil/check_env_dummy.go +++ b/client/testutil/check_env_dummy.go @@ -16,6 +16,6 @@ package testutil -func environmentCheck(addr string) bool { +func environmentCheck(_ string) bool { return true } diff --git a/client/tlsutil/tlsconfig.go b/client/tlsutil/tlsconfig.go index c9cee5987bb..a8bac17f676 100644 --- a/client/tlsutil/tlsconfig.go +++ b/client/tlsutil/tlsconfig.go @@ -131,7 +131,7 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { } if info.AllowedCN != "" { - cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + cfg.VerifyPeerCertificate = func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { if len(chains) != 0 { if info.AllowedCN == chains[0].Subject.CommonName { @@ -145,10 +145,10 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // this only reloads certs when there's a client request // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { return NewCert(info.CertFile, info.KeyFile, info.parseFunc) } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cfg.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { return NewCert(info.CertFile, info.KeyFile, info.parseFunc) } return cfg, nil diff --git a/client/tso_batch_controller.go b/client/tso_batch_controller.go index 5f3b08c2895..d7ba5d7e74b 100644 --- a/client/tso_batch_controller.go +++ b/client/tso_batch_controller.go @@ -140,7 +140,7 @@ func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical in for i := 0; i < tbc.collectedRequestCount; i++ { tsoReq := tbc.collectedRequests[i] tsoReq.physical, tsoReq.logical = physical, tsoutil.AddLogical(firstLogical, int64(i), suffixBits) - defer trace.StartRegion(tsoReq.requestCtx, "pdclient.tsoReqDequeue").End() + defer trace.StartRegion(tsoReq.requestCtx, "pdclient.tsoReqDequeue").End() // nolint tsoReq.tryDone(err) } // Prevent the finished requests from being processed again. diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index 88f8ffd61b5..ad3aa1c5d74 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -580,7 +580,7 @@ func (c *tsoClient) allowTSOFollowerProxy(dc string) bool { // chooseStream uses the reservoir sampling algorithm to randomly choose a connection. // connectionCtxs will only have only one stream to choose when the TSO Follower Proxy is off. -func (c *tsoClient) chooseStream(connectionCtxs *sync.Map) (connectionCtx *tsoConnectionContext) { +func (*tsoClient) chooseStream(connectionCtxs *sync.Map) (connectionCtx *tsoConnectionContext) { idx := 0 connectionCtxs.Range(func(_, cc any) bool { j := rand.Intn(idx + 1) @@ -797,6 +797,7 @@ func (c *tsoClient) processRequests( stream tsoStream, dcLocation string, tbc *tsoBatchController, ) error { requests := tbc.getCollectedRequests() + // nolint for _, req := range requests { defer trace.StartRegion(req.requestCtx, "pdclient.tsoReqSend").End() if span := opentracing.SpanFromContext(req.requestCtx); span != nil && span.Tracer() != nil { diff --git a/client/tso_service_discovery.go b/client/tso_service_discovery.go index f6c46346d5d..34ef16f88b0 100644 --- a/client/tso_service_discovery.go +++ b/client/tso_service_discovery.go @@ -349,13 +349,11 @@ func (c *tsoServiceDiscovery) CheckMemberChanged() error { // AddServingURLSwitchedCallback adds callbacks which will be called when the primary in // a primary/secondary configured cluster is switched. -func (c *tsoServiceDiscovery) AddServingURLSwitchedCallback(callbacks ...func()) { -} +func (*tsoServiceDiscovery) AddServingURLSwitchedCallback(...func()) {} // AddServiceURLsSwitchedCallback adds callbacks which will be called when any primary/secondary // in a primary/secondary configured cluster is changed. -func (c *tsoServiceDiscovery) AddServiceURLsSwitchedCallback(callbacks ...func()) { -} +func (*tsoServiceDiscovery) AddServiceURLsSwitchedCallback(...func()) {} // SetTSOLocalServURLsUpdatedCallback adds a callback which will be called when the local tso // allocator leader list is updated. diff --git a/client/tso_stream.go b/client/tso_stream.go index 83c0f08d4e0..14b72bc697b 100644 --- a/client/tso_stream.go +++ b/client/tso_stream.go @@ -34,13 +34,13 @@ type tsoStreamBuilderFactory interface { type pdTSOStreamBuilderFactory struct{} -func (f *pdTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { +func (*pdTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { return &pdTSOStreamBuilder{client: pdpb.NewPDClient(cc), serverURL: cc.Target()} } type tsoTSOStreamBuilderFactory struct{} -func (f *tsoTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { +func (*tsoTSOStreamBuilderFactory) makeBuilder(cc *grpc.ClientConn) tsoStreamBuilder { return &tsoTSOStreamBuilder{client: tsopb.NewTSOClient(cc), serverURL: cc.Target()} } diff --git a/go.mod b/go.mod index 2620f5ad0a7..c76242f3753 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/joho/godotenv v1.4.0 github.com/mailru/easyjson v0.7.6 - github.com/mgechev/revive v1.0.2 github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d github.com/pingcap/errcode v0.3.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c @@ -95,7 +94,6 @@ require ( github.com/dnephin/pflag v1.0.7 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/fatih/color v1.10.0 // indirect - github.com/fatih/structtag v1.2.0 // indirect github.com/fogleman/gg v1.3.0 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect @@ -138,15 +136,11 @@ require ( github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a // indirect github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/mattn/go-runewidth v0.0.8 // indirect github.com/mattn/go-sqlite3 v1.14.15 // indirect - github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 // indirect github.com/minio/sio v0.3.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oleiade/reflections v1.0.1 // indirect - github.com/olekukonko/tablewriter v0.0.4 // indirect github.com/onsi/gomega v1.20.1 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 // indirect diff --git a/go.sum b/go.sum index d99804c887c..d11fad07aa6 100644 --- a/go.sum +++ b/go.sum @@ -111,11 +111,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -141,7 +138,6 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -316,31 +312,19 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= -github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= -github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus= github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -355,8 +339,6 @@ github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= -github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -644,7 +626,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -712,7 +693,6 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -807,7 +787,6 @@ gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= moul.io/zapgorm2 v1.1.0 h1:qwAlMBYf+qJkJ7PAzJl4oCe6eS6QGiKAXUPeis0+RBE= moul.io/zapgorm2 v1.1.0/go.mod h1:emRfKjNqSzVj5lcgasBdovIXY1jSOwFz2GQZn1Rddks= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pkg/audit/audit.go b/pkg/audit/audit.go index b971b09ed7e..f84d035f8c9 100644 --- a/pkg/audit/audit.go +++ b/pkg/audit/audit.go @@ -118,7 +118,7 @@ func NewLocalLogBackend(before bool) Backend { } // ProcessHTTPRequest is used to implement audit.Backend -func (l *LocalLogBackend) ProcessHTTPRequest(r *http.Request) bool { +func (*LocalLogBackend) ProcessHTTPRequest(r *http.Request) bool { requestInfo, ok := requestutil.RequestInfoFrom(r.Context()) if !ok { return false diff --git a/pkg/autoscaling/calculation.go b/pkg/autoscaling/calculation.go index d85af498e47..8c8783dd618 100644 --- a/pkg/autoscaling/calculation.go +++ b/pkg/autoscaling/calculation.go @@ -409,7 +409,7 @@ func buildPlans(planMap map[string]map[string]struct{}, resourceTypeMap map[stri } // TODO: implement heterogeneous logic and take cluster information into consideration. -func findBestGroupToScaleIn(strategy *Strategy, scaleInQuota float64, groups []*Plan) Plan { +func findBestGroupToScaleIn(_ *Strategy, _ float64, groups []*Plan) Plan { return *groups[0] } diff --git a/pkg/autoscaling/calculation_test.go b/pkg/autoscaling/calculation_test.go index 5d0c2ba1126..9eb4ad648df 100644 --- a/pkg/autoscaling/calculation_test.go +++ b/pkg/autoscaling/calculation_test.go @@ -203,7 +203,7 @@ func TestGetScaledTiKVGroups(t *testing.T) { type mockQuerier struct{} -func (q *mockQuerier) Query(options *QueryOptions) (QueryResult, error) { +func (*mockQuerier) Query(options *QueryOptions) (QueryResult, error) { result := make(QueryResult) for _, addr := range options.addresses { result[addr] = mockResultValue diff --git a/pkg/autoscaling/prometheus_test.go b/pkg/autoscaling/prometheus_test.go index 3ee6cb94e37..9fe69e810d1 100644 --- a/pkg/autoscaling/prometheus_test.go +++ b/pkg/autoscaling/prometheus_test.go @@ -168,7 +168,7 @@ func makeJSONResponse(promResp *response) (*http.Response, []byte, error) { return response, body, nil } -func (c *normalClient) URL(ep string, args map[string]string) *url.URL { +func (*normalClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } @@ -206,11 +206,11 @@ func TestRetrieveCPUMetrics(t *testing.T) { type emptyResponseClient struct{} -func (c *emptyResponseClient) URL(ep string, args map[string]string) *url.URL { +func (*emptyResponseClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } -func (c *emptyResponseClient) Do(_ context.Context, req *http.Request) (r *http.Response, body []byte, err error) { +func (*emptyResponseClient) Do(context.Context, *http.Request) (r *http.Response, body []byte, err error) { promResp := &response{ Status: "success", Data: data{ @@ -235,11 +235,11 @@ func TestEmptyResponse(t *testing.T) { type errorHTTPStatusClient struct{} -func (c *errorHTTPStatusClient) URL(ep string, args map[string]string) *url.URL { +func (*errorHTTPStatusClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } -func (c *errorHTTPStatusClient) Do(_ context.Context, req *http.Request) (r *http.Response, body []byte, err error) { +func (*errorHTTPStatusClient) Do(context.Context, *http.Request) (r *http.Response, body []byte, err error) { promResp := &response{} r, body, err = makeJSONResponse(promResp) @@ -262,11 +262,11 @@ func TestErrorHTTPStatus(t *testing.T) { type errorPrometheusStatusClient struct{} -func (c *errorPrometheusStatusClient) URL(ep string, args map[string]string) *url.URL { +func (*errorPrometheusStatusClient) URL(ep string, args map[string]string) *url.URL { return doURL(ep, args) } -func (c *errorPrometheusStatusClient) Do(_ context.Context, req *http.Request) (r *http.Response, body []byte, err error) { +func (*errorPrometheusStatusClient) Do(_ context.Context, _ *http.Request) (r *http.Response, body []byte, err error) { promResp := &response{ Status: "error", } diff --git a/pkg/btree/btree_generic.go b/pkg/btree/btree_generic.go index f918a8ac686..599614678eb 100644 --- a/pkg/btree/btree_generic.go +++ b/pkg/btree/btree_generic.go @@ -73,7 +73,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//revive:disable +// nolint package btree import ( diff --git a/pkg/btree/btree_generic_test.go b/pkg/btree/btree_generic_test.go index 9aa118fb8ad..fd0df3e5aaf 100644 --- a/pkg/btree/btree_generic_test.go +++ b/pkg/btree/btree_generic_test.go @@ -475,7 +475,7 @@ func BenchmarkSeek(b *testing.B) { b.StartTimer() for i := 0; i < b.N; i++ { - tr.AscendGreaterOrEqual(Int(i%size), func(i Int) bool { return false }) + tr.AscendGreaterOrEqual(Int(i%size), func(_ Int) bool { return false }) } } diff --git a/pkg/core/metrics.go b/pkg/core/metrics.go index e6f3535b1d7..d23cf9dfcaa 100644 --- a/pkg/core/metrics.go +++ b/pkg/core/metrics.go @@ -123,19 +123,19 @@ func NewNoopHeartbeatProcessTracer() RegionHeartbeatProcessTracer { return &noopHeartbeatProcessTracer{} } -func (n *noopHeartbeatProcessTracer) Begin() {} -func (n *noopHeartbeatProcessTracer) OnPreCheckFinished() {} -func (n *noopHeartbeatProcessTracer) OnAsyncHotStatsFinished() {} -func (n *noopHeartbeatProcessTracer) OnRegionGuideFinished() {} -func (n *noopHeartbeatProcessTracer) OnSaveCacheBegin() {} -func (n *noopHeartbeatProcessTracer) OnSaveCacheFinished() {} -func (n *noopHeartbeatProcessTracer) OnCheckOverlapsFinished() {} -func (n *noopHeartbeatProcessTracer) OnValidateRegionFinished() {} -func (n *noopHeartbeatProcessTracer) OnSetRegionFinished() {} -func (n *noopHeartbeatProcessTracer) OnUpdateSubTreeFinished() {} -func (n *noopHeartbeatProcessTracer) OnCollectRegionStatsFinished() {} -func (n *noopHeartbeatProcessTracer) OnAllStageFinished() {} -func (n *noopHeartbeatProcessTracer) LogFields() []zap.Field { +func (*noopHeartbeatProcessTracer) Begin() {} +func (*noopHeartbeatProcessTracer) OnPreCheckFinished() {} +func (*noopHeartbeatProcessTracer) OnAsyncHotStatsFinished() {} +func (*noopHeartbeatProcessTracer) OnRegionGuideFinished() {} +func (*noopHeartbeatProcessTracer) OnSaveCacheBegin() {} +func (*noopHeartbeatProcessTracer) OnSaveCacheFinished() {} +func (*noopHeartbeatProcessTracer) OnCheckOverlapsFinished() {} +func (*noopHeartbeatProcessTracer) OnValidateRegionFinished() {} +func (*noopHeartbeatProcessTracer) OnSetRegionFinished() {} +func (*noopHeartbeatProcessTracer) OnUpdateSubTreeFinished() {} +func (*noopHeartbeatProcessTracer) OnCollectRegionStatsFinished() {} +func (*noopHeartbeatProcessTracer) OnAllStageFinished() {} +func (*noopHeartbeatProcessTracer) LogFields() []zap.Field { return nil } diff --git a/pkg/core/region.go b/pkg/core/region.go index f7a4ef5f0fd..baabafa1fa9 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -716,7 +716,7 @@ type RegionGuideFunc func(region, origin *RegionInfo) (saveKV, saveCache, needSy // GenerateRegionGuideFunc is used to generate a RegionGuideFunc. Control the log output by specifying the log function. // nil means do not print the log. func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { - noLog := func(msg string, fields ...zap.Field) {} + noLog := func(string, ...zap.Field) {} debug, info := noLog, noLog if enableLog { debug = log.Debug @@ -964,7 +964,7 @@ func (r *RegionsInfo) AtomicCheckAndPutRegion(region *RegionInfo, trace RegionHe } // GetRelevantRegions returns the relevant regions for a given region. -func (r *RegionsInfo) GetRelevantRegions(region *RegionInfo, trace RegionHeartbeatProcessTracer) (origin *RegionInfo, overlaps []*regionItem) { +func (r *RegionsInfo) GetRelevantRegions(region *RegionInfo, _ RegionHeartbeatProcessTracer) (origin *RegionInfo, overlaps []*regionItem) { r.t.RLock() defer r.t.RUnlock() origin = r.getRegionLocked(region.GetID()) diff --git a/pkg/core/store_test.go b/pkg/core/store_test.go index 67618a63ea9..5cb324e5635 100644 --- a/pkg/core/store_test.go +++ b/pkg/core/store_test.go @@ -62,7 +62,7 @@ func TestDistinctScore(t *testing.T) { re.Equal(float64(0), DistinctScore(labels, stores, store)) } -func TestCloneStore(t *testing.T) { +func TestCloneStore(_ *testing.T) { meta := &metapb.Store{Id: 1, Address: "mock://tikv-1", Labels: []*metapb.StoreLabel{{Key: "zone", Value: "z1"}, {Key: "host", Value: "h1"}}} store := NewStoreInfo(meta) start := time.Now() diff --git a/pkg/core/storelimit/sliding_window.go b/pkg/core/storelimit/sliding_window.go index 0a70eb548d0..8feb0a2094d 100644 --- a/pkg/core/storelimit/sliding_window.go +++ b/pkg/core/storelimit/sliding_window.go @@ -50,7 +50,7 @@ func NewSlidingWindows() *SlidingWindows { } // Version returns v2 -func (s *SlidingWindows) Version() string { +func (*SlidingWindows) Version() string { return VersionV2 } @@ -75,8 +75,7 @@ func (s *SlidingWindows) Feedback(e float64) { } // Reset does nothing because the capacity depends on the feedback. -func (s *SlidingWindows) Reset(_ float64, _ Type) { -} +func (*SlidingWindows) Reset(_ float64, _ Type) {} func (s *SlidingWindows) set(cap float64, typ Type) { if typ != SendSnapshot { diff --git a/pkg/core/storelimit/store_limit.go b/pkg/core/storelimit/store_limit.go index dc1de88e09f..8d70b2918a1 100644 --- a/pkg/core/storelimit/store_limit.go +++ b/pkg/core/storelimit/store_limit.go @@ -82,15 +82,15 @@ func NewStoreRateLimit(ratePerSec float64) StoreLimit { } // Ack does nothing. -func (l *StoreRateLimit) Ack(_ int64, _ Type) {} +func (*StoreRateLimit) Ack(_ int64, _ Type) {} // Version returns v1 -func (l *StoreRateLimit) Version() string { +func (*StoreRateLimit) Version() string { return VersionV1 } // Feedback does nothing. -func (l *StoreRateLimit) Feedback(_ float64) {} +func (*StoreRateLimit) Feedback(_ float64) {} // Available returns the number of available tokens. // notice that the priority level is not used. diff --git a/pkg/dashboard/adapter/redirector_test.go b/pkg/dashboard/adapter/redirector_test.go index fff052f1d50..7767a6fda34 100644 --- a/pkg/dashboard/adapter/redirector_test.go +++ b/pkg/dashboard/adapter/redirector_test.go @@ -42,14 +42,14 @@ func TestRedirectorTestSuite(t *testing.T) { func (suite *redirectorTestSuite) SetupSuite() { suite.tempText = "temp1" - suite.tempServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + suite.tempServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, _ = io.WriteString(w, suite.tempText) })) suite.testName = "test1" suite.redirector = NewRedirector(suite.testName, nil) suite.noRedirectHTTPClient = &http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { + CheckRedirect: func(*http.Request, []*http.Request) error { // ErrUseLastResponse can be returned by Client.CheckRedirect hooks to // control how redirects are processed. If returned, the next request // is not sent and the most recent response is returned with its body diff --git a/pkg/dashboard/dashboard.go b/pkg/dashboard/dashboard.go index 9cd61a6f332..998127d0f1b 100644 --- a/pkg/dashboard/dashboard.go +++ b/pkg/dashboard/dashboard.go @@ -69,7 +69,7 @@ func GetServiceBuilders() []server.HandlerBuilder { // The order of execution must be sequential. return []server.HandlerBuilder{ // Dashboard API Service - func(ctx context.Context, srv *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { + func(_ context.Context, srv *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { distroutil.MustLoadAndReplaceStrings() if cfg, err = adapter.GenDashboardConfig(srv); err != nil { diff --git a/pkg/election/leadership_test.go b/pkg/election/leadership_test.go index de2e4b1129b..1fde4ddeba7 100644 --- a/pkg/election/leadership_test.go +++ b/pkg/election/leadership_test.go @@ -117,35 +117,35 @@ func TestExitWatch(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/election/fastTick", "return(true)")) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/fastTick", "return(true)")) // Case1: close the client before the watch loop starts - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(_ *embed.Etcd, client *clientv3.Client) func() { re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayWatcher", `pause`)) client.Close() re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayWatcher")) return func() {} }) // Case2: close the client when the watch loop is running - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(_ *embed.Etcd, client *clientv3.Client) func() { // Wait for the watch loop to start time.Sleep(500 * time.Millisecond) client.Close() return func() {} }) // Case3: delete the leader key - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(_ *embed.Etcd, client *clientv3.Client) func() { leaderKey := leaderKey _, err := client.Delete(context.Background(), leaderKey) re.NoError(err) return func() {} }) // Case4: close the server before the watch loop starts - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(server *embed.Etcd, _ *clientv3.Client) func() { re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayWatcher", `pause`)) server.Close() re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayWatcher")) return func() {} }) // Case5: close the server when the watch loop is running - checkExitWatch(t, leaderKey, func(server *embed.Etcd, client *clientv3.Client) func() { + checkExitWatch(t, leaderKey, func(server *embed.Etcd, _ *clientv3.Client) func() { // Wait for the watch loop to start time.Sleep(500 * time.Millisecond) server.Close() diff --git a/pkg/encryption/key_manager_test.go b/pkg/encryption/key_manager_test.go index 74f8b9a3b47..26453eeb5b3 100644 --- a/pkg/encryption/key_manager_test.go +++ b/pkg/encryption/key_manager_test.go @@ -774,7 +774,7 @@ func TestSetLeadershipMasterKeyWithCiphertextKey(t *testing.T) { outputMasterKey, _ := hex.DecodeString(testMasterKey) outputCiphertextKey, _ := hex.DecodeString(testCiphertextKey) helper.newMasterKey = func( - meta *encryptionpb.MasterKey, + _ *encryptionpb.MasterKey, ciphertext []byte, ) (*MasterKey, error) { if newMasterKeyCalled < 2 { @@ -905,7 +905,7 @@ func TestKeyRotation(t *testing.T) { mockNow := int64(1601679533) helper.now = func() time.Time { return time.Unix(atomic.LoadInt64(&mockNow), 0) } mockTick := make(chan time.Time) - helper.tick = func(ticker *time.Ticker) <-chan time.Time { return mockTick } + helper.tick = func(_ *time.Ticker) <-chan time.Time { return mockTick } // Listen on watcher event reloadEvent := make(chan struct{}, 10) helper.eventAfterReloadByWatcher = func() { @@ -1001,7 +1001,7 @@ func TestKeyRotationConflict(t *testing.T) { mockNow := int64(1601679533) helper.now = func() time.Time { return time.Unix(atomic.LoadInt64(&mockNow), 0) } mockTick := make(chan time.Time, 10) - helper.tick = func(ticker *time.Ticker) <-chan time.Time { return mockTick } + helper.tick = func(_ *time.Ticker) <-chan time.Time { return mockTick } // Listen on ticker event tickerEvent := make(chan struct{}, 10) helper.eventAfterTicker = func() { diff --git a/pkg/encryption/kms.go b/pkg/encryption/kms.go index 7c52b4280c2..99dcf9619a3 100644 --- a/pkg/encryption/kms.go +++ b/pkg/encryption/kms.go @@ -60,7 +60,7 @@ func newMasterKeyFromKMS( roleArn := os.Getenv(envAwsRoleArn) tokenFile := os.Getenv(envAwsWebIdentityTokenFile) sessionName := os.Getenv(envAwsRoleSessionName) - optFn := func(options *kms.Options) {} + optFn := func(*kms.Options) {} // Session name is optional. if roleArn != "" && tokenFile != "" { client := sts.NewFromConfig(cfg) diff --git a/pkg/errs/errs_test.go b/pkg/errs/errs_test.go index 1dcabc32d9a..01b7de461b8 100644 --- a/pkg/errs/errs_test.go +++ b/pkg/errs/errs_test.go @@ -43,7 +43,7 @@ func (w *testingWriter) Write(p []byte) (n int, err error) { return n, nil } -func (w *testingWriter) Sync() error { +func (*testingWriter) Sync() error { return nil } @@ -124,7 +124,7 @@ func TestErrorEqual(t *testing.T) { re.False(errors.ErrorEqual(err1, err2)) } -func TestZapError(t *testing.T) { +func TestZapError(_ *testing.T) { err := errors.New("test") log.Info("test", ZapError(err)) err1 := ErrSchedulerNotFound diff --git a/pkg/mcs/metastorage/server/grpc_service.go b/pkg/mcs/metastorage/server/grpc_service.go index f5de50765e8..f018dc72f9f 100644 --- a/pkg/mcs/metastorage/server/grpc_service.go +++ b/pkg/mcs/metastorage/server/grpc_service.go @@ -39,13 +39,13 @@ var ( var _ meta_storagepb.MetaStorageServer = (*Service)(nil) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index 508f37fe069..3c7886daec4 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -108,7 +108,8 @@ type ControllerConfig struct { } // Adjust adjusts the configuration and initializes it with the default value if necessary. -func (rmc *ControllerConfig) Adjust(meta *configutil.ConfigMetaData) { +// FIXME: is it expected? +func (rmc *ControllerConfig) Adjust(_ *configutil.ConfigMetaData) { if rmc == nil { return } @@ -201,11 +202,11 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { configutil.AdjustCommandLineString(flagSet, &c.ListenAddr, "listen-addr") configutil.AdjustCommandLineString(flagSet, &c.AdvertiseListenAddr, "advertise-listen-addr") - return c.Adjust(meta, false) + return c.Adjust(meta) } // Adjust is used to adjust the resource manager configurations. -func (c *Config) Adjust(meta *toml.MetaData, reloading bool) error { +func (c *Config) Adjust(meta *toml.MetaData) error { configMetaData := configutil.NewConfigMetadata(meta) if err := configMetaData.CheckUndecoded(); err != nil { c.WarningMsgs = append(c.WarningMsgs, err.Error()) diff --git a/pkg/mcs/resourcemanager/server/config_test.go b/pkg/mcs/resourcemanager/server/config_test.go index 64fd133ea73..2d57100468e 100644 --- a/pkg/mcs/resourcemanager/server/config_test.go +++ b/pkg/mcs/resourcemanager/server/config_test.go @@ -39,7 +39,7 @@ read-cpu-ms-cost = 5.0 cfg := NewConfig() meta, err := toml.Decode(cfgData, &cfg) re.NoError(err) - err = cfg.Adjust(&meta, false) + err = cfg.Adjust(&meta) re.NoError(err) re.Equal(time.Second*2, cfg.Controller.DegradedModeWaitDuration.Duration) diff --git a/pkg/mcs/resourcemanager/server/grpc_service.go b/pkg/mcs/resourcemanager/server/grpc_service.go index cf985a14764..2f35042c48f 100644 --- a/pkg/mcs/resourcemanager/server/grpc_service.go +++ b/pkg/mcs/resourcemanager/server/grpc_service.go @@ -41,13 +41,13 @@ var ( var _ rmpb.ResourceManagerServer = (*Service)(nil) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } @@ -94,7 +94,7 @@ func (s *Service) checkServing() error { } // GetResourceGroup implements ResourceManagerServer.GetResourceGroup. -func (s *Service) GetResourceGroup(ctx context.Context, req *rmpb.GetResourceGroupRequest) (*rmpb.GetResourceGroupResponse, error) { +func (s *Service) GetResourceGroup(_ context.Context, req *rmpb.GetResourceGroupRequest) (*rmpb.GetResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -108,7 +108,7 @@ func (s *Service) GetResourceGroup(ctx context.Context, req *rmpb.GetResourceGro } // ListResourceGroups implements ResourceManagerServer.ListResourceGroups. -func (s *Service) ListResourceGroups(ctx context.Context, req *rmpb.ListResourceGroupsRequest) (*rmpb.ListResourceGroupsResponse, error) { +func (s *Service) ListResourceGroups(_ context.Context, req *rmpb.ListResourceGroupsRequest) (*rmpb.ListResourceGroupsResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -123,7 +123,7 @@ func (s *Service) ListResourceGroups(ctx context.Context, req *rmpb.ListResource } // AddResourceGroup implements ResourceManagerServer.AddResourceGroup. -func (s *Service) AddResourceGroup(ctx context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { +func (s *Service) AddResourceGroup(_ context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -135,7 +135,7 @@ func (s *Service) AddResourceGroup(ctx context.Context, req *rmpb.PutResourceGro } // DeleteResourceGroup implements ResourceManagerServer.DeleteResourceGroup. -func (s *Service) DeleteResourceGroup(ctx context.Context, req *rmpb.DeleteResourceGroupRequest) (*rmpb.DeleteResourceGroupResponse, error) { +func (s *Service) DeleteResourceGroup(_ context.Context, req *rmpb.DeleteResourceGroupRequest) (*rmpb.DeleteResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } @@ -147,7 +147,7 @@ func (s *Service) DeleteResourceGroup(ctx context.Context, req *rmpb.DeleteResou } // ModifyResourceGroup implements ResourceManagerServer.ModifyResourceGroup. -func (s *Service) ModifyResourceGroup(ctx context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { +func (s *Service) ModifyResourceGroup(_ context.Context, req *rmpb.PutResourceGroupRequest) (*rmpb.PutResourceGroupResponse, error) { if err := s.checkServing(); err != nil { return nil, err } diff --git a/pkg/mcs/scheduling/server/apis/v1/api.go b/pkg/mcs/scheduling/server/apis/v1/api.go index 36451e5f031..be3277f3fc6 100644 --- a/pkg/mcs/scheduling/server/apis/v1/api.go +++ b/pkg/mcs/scheduling/server/apis/v1/api.go @@ -1292,7 +1292,7 @@ func scatterRegions(c *gin.Context) { if !ok { return 0, nil, errors.New("regions_id is invalid") } - return handler.ScatterRegionsByID(ids, group, retryLimit, false) + return handler.ScatterRegionsByID(ids, group, retryLimit) }() if err != nil { c.String(http.StatusInternalServerError, err.Error()) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 1b915b6874d..7ee7ae88cd1 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -473,7 +473,7 @@ func (c *Cluster) runMetricsCollectionJob() { select { case <-c.ctx.Done(): log.Info("metrics are reset") - c.resetMetrics() + resetMetrics() log.Info("metrics collection job has been stopped") return case <-ticker.C: @@ -487,7 +487,7 @@ func (c *Cluster) collectMetrics() { stores := c.GetStores() for _, s := range stores { statsMap.Observe(s) - statsMap.ObserveHotStat(s, c.hotStat.StoresStats) + statistics.ObserveHotStat(s, c.hotStat.StoresStats) } statsMap.Collect() @@ -504,7 +504,7 @@ func (c *Cluster) collectMetrics() { c.RegionsInfo.CollectWaitLockMetrics() } -func (c *Cluster) resetMetrics() { +func resetMetrics() { statistics.Reset() schedulers.ResetSchedulerMetrics() schedule.ResetHotSpotMetrics() diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 3e347afc12e..148a7015d11 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -294,7 +294,7 @@ func (o *PersistConfig) SetScheduleConfig(cfg *sc.ScheduleConfig) { } // AdjustScheduleCfg adjusts the schedule config during the initialization. -func (o *PersistConfig) AdjustScheduleCfg(scheduleCfg *sc.ScheduleConfig) { +func AdjustScheduleCfg(scheduleCfg *sc.ScheduleConfig) { // In case we add new default schedulers. for _, ps := range sc.DefaultSchedulers { if slice.NoneOf(scheduleCfg.Schedulers, func(i int) bool { @@ -374,7 +374,7 @@ func (o *PersistConfig) IsUseJointConsensus() bool { } // GetKeyType returns the key type. -func (o *PersistConfig) GetKeyType() constant.KeyType { +func (*PersistConfig) GetKeyType() constant.KeyType { return constant.StringToKeyType("table") } @@ -685,7 +685,7 @@ func (o *PersistConfig) SetSplitMergeInterval(splitMergeInterval time.Duration) } // SetHaltScheduling set HaltScheduling. -func (o *PersistConfig) SetHaltScheduling(halt bool, source string) { +func (o *PersistConfig) SetHaltScheduling(halt bool, _ string) { v := o.GetScheduleConfig().Clone() v.HaltScheduling = halt o.SetScheduleConfig(v) @@ -735,25 +735,25 @@ func (o *PersistConfig) IsRaftKV2() bool { // AddSchedulerCfg adds the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (o *PersistConfig) AddSchedulerCfg(string, []string) {} +func (*PersistConfig) AddSchedulerCfg(string, []string) {} // RemoveSchedulerCfg removes the scheduler configurations. // This method is a no-op since we only use configurations derived from one-way synchronization from API server now. -func (o *PersistConfig) RemoveSchedulerCfg(tp string) {} +func (*PersistConfig) RemoveSchedulerCfg(string) {} // CheckLabelProperty checks if the label property is satisfied. -func (o *PersistConfig) CheckLabelProperty(typ string, labels []*metapb.StoreLabel) bool { +func (*PersistConfig) CheckLabelProperty(string, []*metapb.StoreLabel) bool { return false } // IsTraceRegionFlow returns if the region flow is tracing. // If the accuracy cannot reach 0.1 MB, it is considered not. -func (o *PersistConfig) IsTraceRegionFlow() bool { +func (*PersistConfig) IsTraceRegionFlow() bool { return false } // Persist saves the configuration to the storage. -func (o *PersistConfig) Persist(storage endpoint.ConfigStorage) error { +func (*PersistConfig) Persist(endpoint.ConfigStorage) error { return nil } diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 8db5e656279..d1ca99bd36d 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -129,14 +129,14 @@ func (cw *Watcher) initializeConfigWatcher() error { return err } log.Info("update scheduling config", zap.Reflect("new", cfg)) - cw.AdjustScheduleCfg(&cfg.Schedule) + AdjustScheduleCfg(&cfg.Schedule) cw.SetClusterVersion(&cfg.ClusterVersion) cw.SetScheduleConfig(&cfg.Schedule) cw.SetReplicationConfig(&cfg.Replication) cw.SetStoreConfig(&cfg.Store) return nil } - deleteFn := func(kv *mvccpb.KeyValue) error { + deleteFn := func(*mvccpb.KeyValue) error { return nil } cw.configWatcher = etcdutil.NewLoopWatcher( diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index ebce73e3303..62ec1c1118f 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -45,13 +45,13 @@ var ( ) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } @@ -169,7 +169,7 @@ func (s *Service) RegionHeartbeat(stream schedulingpb.Scheduling_RegionHeartbeat } // StoreHeartbeat implements gRPC SchedulingServer. -func (s *Service) StoreHeartbeat(ctx context.Context, request *schedulingpb.StoreHeartbeatRequest) (*schedulingpb.StoreHeartbeatResponse, error) { +func (s *Service) StoreHeartbeat(_ context.Context, request *schedulingpb.StoreHeartbeatRequest) (*schedulingpb.StoreHeartbeatResponse, error) { c := s.GetCluster() if c == nil { // TODO: add metrics @@ -203,7 +203,7 @@ func (s *Service) SplitRegions(ctx context.Context, request *schedulingpb.SplitR } // ScatterRegions implements gRPC SchedulingServer. -func (s *Service) ScatterRegions(ctx context.Context, request *schedulingpb.ScatterRegionsRequest) (*schedulingpb.ScatterRegionsResponse, error) { +func (s *Service) ScatterRegions(_ context.Context, request *schedulingpb.ScatterRegionsRequest) (*schedulingpb.ScatterRegionsResponse, error) { c := s.GetCluster() if c == nil { return &schedulingpb.ScatterRegionsResponse{Header: s.notBootstrappedHeader()}, nil @@ -235,7 +235,7 @@ func (s *Service) ScatterRegions(ctx context.Context, request *schedulingpb.Scat } // GetOperator gets information about the operator belonging to the specify region. -func (s *Service) GetOperator(ctx context.Context, request *schedulingpb.GetOperatorRequest) (*schedulingpb.GetOperatorResponse, error) { +func (s *Service) GetOperator(_ context.Context, request *schedulingpb.GetOperatorRequest) (*schedulingpb.GetOperatorResponse, error) { c := s.GetCluster() if c == nil { return &schedulingpb.GetOperatorResponse{Header: s.notBootstrappedHeader()}, nil @@ -262,7 +262,7 @@ func (s *Service) GetOperator(ctx context.Context, request *schedulingpb.GetOper } // AskBatchSplit implements gRPC SchedulingServer. -func (s *Service) AskBatchSplit(ctx context.Context, request *schedulingpb.AskBatchSplitRequest) (*schedulingpb.AskBatchSplitResponse, error) { +func (s *Service) AskBatchSplit(_ context.Context, request *schedulingpb.AskBatchSplitRequest) (*schedulingpb.AskBatchSplitResponse, error) { c := s.GetCluster() if c == nil { return &schedulingpb.AskBatchSplitResponse{Header: s.notBootstrappedHeader()}, nil diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index d8a8dd3e609..ea90b9d4e49 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -109,7 +109,7 @@ func NewWatcher( func (rw *Watcher) initializeRuleWatcher() error { var suspectKeyRanges *core.KeyRanges - preEventsFn := func(events []*clientv3.Event) error { + preEventsFn := func([]*clientv3.Event) error { // It will be locked until the postEventsFn is finished. rw.ruleManager.Lock() rw.patch = rw.ruleManager.BeginPatch() @@ -149,10 +149,9 @@ func (rw *Watcher) initializeRuleWatcher() error { suspectKeyRanges.Append(rule.StartKey, rule.EndKey) } return nil - } else { - log.Warn("unknown key when updating placement rule", zap.String("key", key)) - return nil } + log.Warn("unknown key when updating placement rule", zap.String("key", key)) + return nil } deleteFn := func(kv *mvccpb.KeyValue) error { key := string(kv.Key) @@ -181,12 +180,11 @@ func (rw *Watcher) initializeRuleWatcher() error { suspectKeyRanges.Append(rule.StartKey, rule.EndKey) } return nil - } else { - log.Warn("unknown key when deleting placement rule", zap.String("key", key)) - return nil } + log.Warn("unknown key when deleting placement rule", zap.String("key", key)) + return nil } - postEventsFn := func(events []*clientv3.Event) error { + postEventsFn := func([]*clientv3.Event) error { defer rw.ruleManager.Unlock() if err := rw.ruleManager.TryCommitPatchLocked(rw.patch); err != nil { log.Error("failed to commit patch", zap.Error(err)) @@ -213,7 +211,7 @@ func (rw *Watcher) initializeRuleWatcher() error { func (rw *Watcher) initializeRegionLabelWatcher() error { prefixToTrim := rw.regionLabelPathPrefix + "/" // TODO: use txn in region labeler. - preEventsFn := func(events []*clientv3.Event) error { + preEventsFn := func([]*clientv3.Event) error { // It will be locked until the postEventsFn is finished. rw.regionLabeler.Lock() return nil @@ -231,7 +229,7 @@ func (rw *Watcher) initializeRegionLabelWatcher() error { log.Info("delete region label rule", zap.String("key", key)) return rw.regionLabeler.DeleteLabelRuleLocked(strings.TrimPrefix(key, prefixToTrim)) } - postEventsFn := func(events []*clientv3.Event) error { + postEventsFn := func([]*clientv3.Event) error { defer rw.regionLabeler.Unlock() rw.regionLabeler.BuildRangeListLocked() return nil diff --git a/pkg/mcs/server/server.go b/pkg/mcs/server/server.go index 2c008e8f5e8..6aec799278c 100644 --- a/pkg/mcs/server/server.go +++ b/pkg/mcs/server/server.go @@ -171,7 +171,7 @@ func (bs *BaseServer) StartTimestamp() int64 { // CloseClientConns closes all client connections. func (bs *BaseServer) CloseClientConns() { - bs.clientConns.Range(func(key, value any) bool { + bs.clientConns.Range(func(_, value any) bool { conn := value.(*grpc.ClientConn) if err := conn.Close(); err != nil { log.Error("close client connection meet error") diff --git a/pkg/mcs/tso/server/config.go b/pkg/mcs/tso/server/config.go index eedf3a2f1b1..c117dd72e38 100644 --- a/pkg/mcs/tso/server/config.go +++ b/pkg/mcs/tso/server/config.go @@ -177,11 +177,11 @@ func (c *Config) Parse(flagSet *pflag.FlagSet) error { configutil.AdjustCommandLineString(flagSet, &c.ListenAddr, "listen-addr") configutil.AdjustCommandLineString(flagSet, &c.AdvertiseListenAddr, "advertise-listen-addr") - return c.Adjust(meta, false) + return c.Adjust(meta) } // Adjust is used to adjust the TSO configurations. -func (c *Config) Adjust(meta *toml.MetaData, reloading bool) error { +func (c *Config) Adjust(meta *toml.MetaData) error { configMetaData := configutil.NewConfigMetadata(meta) if err := configMetaData.CheckUndecoded(); err != nil { c.WarningMsgs = append(c.WarningMsgs, err.Error()) diff --git a/pkg/mcs/tso/server/config_test.go b/pkg/mcs/tso/server/config_test.go index 9f5bc298964..2cb9c8e019a 100644 --- a/pkg/mcs/tso/server/config_test.go +++ b/pkg/mcs/tso/server/config_test.go @@ -83,7 +83,7 @@ max-gap-reset-ts = "1h" cfg := NewConfig() meta, err := toml.Decode(cfgData, &cfg) re.NoError(err) - err = cfg.Adjust(&meta, false) + err = cfg.Adjust(&meta) re.NoError(err) re.Equal("tso-test-name", cfg.GetName()) diff --git a/pkg/mcs/tso/server/grpc_service.go b/pkg/mcs/tso/server/grpc_service.go index 31a74f2a688..03250d9ed37 100644 --- a/pkg/mcs/tso/server/grpc_service.go +++ b/pkg/mcs/tso/server/grpc_service.go @@ -42,13 +42,13 @@ var ( var _ tsopb.TSOServer = (*Service)(nil) // SetUpRestHandler is a hook to sets up the REST service. -var SetUpRestHandler = func(srv *Service) (http.Handler, apiutil.APIServiceGroup) { +var SetUpRestHandler = func(*Service) (http.Handler, apiutil.APIServiceGroup) { return dummyRestService{}, apiutil.APIServiceGroup{} } type dummyRestService struct{} -func (d dummyRestService) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (dummyRestService) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotImplemented) w.Write([]byte("not implemented")) } @@ -135,7 +135,7 @@ func (s *Service) Tso(stream tsopb.TSO_TsoServer) error { // FindGroupByKeyspaceID returns the keyspace group that the keyspace belongs to. func (s *Service) FindGroupByKeyspaceID( - ctx context.Context, request *tsopb.FindGroupByKeyspaceIDRequest, + _ context.Context, request *tsopb.FindGroupByKeyspaceIDRequest, ) (*tsopb.FindGroupByKeyspaceIDResponse, error) { respKeyspaceGroup := request.GetHeader().GetKeyspaceGroupId() if errorType, err := s.validRequest(request.GetHeader()); err != nil { @@ -189,7 +189,7 @@ func (s *Service) FindGroupByKeyspaceID( // GetMinTS gets the minimum timestamp across all keyspace groups served by the TSO server // who receives and handles the request. func (s *Service) GetMinTS( - ctx context.Context, request *tsopb.GetMinTSRequest, + _ context.Context, request *tsopb.GetMinTSRequest, ) (*tsopb.GetMinTSResponse, error) { respKeyspaceGroup := request.GetHeader().GetKeyspaceGroupId() if errorType, err := s.validRequest(request.GetHeader()); err != nil { diff --git a/pkg/mcs/tso/server/server.go b/pkg/mcs/tso/server/server.go index f5f46a29504..c38c7142730 100644 --- a/pkg/mcs/tso/server/server.go +++ b/pkg/mcs/tso/server/server.go @@ -250,7 +250,7 @@ func (s *Server) ResignPrimary(keyspaceID, keyspaceGroupID uint32) error { // AddServiceReadyCallback implements basicserver. // It adds callbacks when it's ready for providing tso service. -func (s *Server) AddServiceReadyCallback(callbacks ...func(context.Context) error) { +func (*Server) AddServiceReadyCallback(...func(context.Context) error) { // Do nothing here. The primary of each keyspace group assigned to this host // will respond to the requests accordingly. } @@ -278,7 +278,7 @@ func (s *Server) GetTSOAllocatorManager(keyspaceGroupID uint32) (*tso.AllocatorM } // IsLocalRequest checks if the forwarded host is the current host -func (s *Server) IsLocalRequest(forwardedHost string) bool { +func (*Server) IsLocalRequest(forwardedHost string) bool { // TODO: Check if the forwarded host is the current host. // The logic is depending on etcd service mode -- if the TSO service // uses the embedded etcd, check against ClientUrls; otherwise check @@ -310,13 +310,13 @@ func (s *Server) ValidateRequest(header *tsopb.RequestHeader) error { // GetExternalTS returns external timestamp from the cache or the persistent storage. // TODO: Implement GetExternalTS -func (s *Server) GetExternalTS() uint64 { +func (*Server) GetExternalTS() uint64 { return 0 } // SetExternalTS saves external timestamp to cache and the persistent storage. // TODO: Implement SetExternalTS -func (s *Server) SetExternalTS(externalTS uint64) error { +func (*Server) SetExternalTS(uint64) error { return nil } diff --git a/pkg/member/participant.go b/pkg/member/participant.go index 0bf3bcc547e..8a0ffadd31e 100644 --- a/pkg/member/participant.go +++ b/pkg/member/participant.go @@ -200,7 +200,7 @@ func (m *Participant) KeepLeader(ctx context.Context) { // PreCheckLeader does some pre-check before checking whether or not it's the leader. // It returns true if it passes the pre-check, false otherwise. -func (m *Participant) PreCheckLeader() error { +func (*Participant) PreCheckLeader() error { // No specific thing to check. Returns no error. return nil } @@ -280,7 +280,7 @@ func (m *Participant) IsSameLeader(leader participant) bool { } // CheckPriority checks whether there is another participant has higher priority and resign it as the leader if so. -func (m *Participant) CheckPriority(ctx context.Context) { +func (*Participant) CheckPriority(_ context.Context) { // TODO: implement weighted-election when it's in need } diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 6cf7ae143df..e5b3e39a502 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -123,7 +123,7 @@ func (mc *Cluster) AllocID() (uint64, error) { } // UpdateRegionsLabelLevelStats updates the label level stats for the regions. -func (mc *Cluster) UpdateRegionsLabelLevelStats(regions []*core.RegionInfo) {} +func (*Cluster) UpdateRegionsLabelLevelStats(_ []*core.RegionInfo) {} // LoadRegion puts region info without leader func (mc *Cluster) LoadRegion(regionID uint64, peerStoreIDs ...uint64) { diff --git a/pkg/mock/mockhbstream/mockhbstream.go b/pkg/mock/mockhbstream/mockhbstream.go index 289f31d63dd..ac8f246f86a 100644 --- a/pkg/mock/mockhbstream/mockhbstream.go +++ b/pkg/mock/mockhbstream/mockhbstream.go @@ -46,10 +46,10 @@ func (s HeartbeatStream) Send(m core.RegionHeartbeatResponse) error { } // SendMsg is used to send the message. -func (s HeartbeatStream) SendMsg(region *core.RegionInfo, msg *pdpb.RegionHeartbeatResponse) {} +func (HeartbeatStream) SendMsg(*core.RegionInfo, *pdpb.RegionHeartbeatResponse) {} // BindStream mock method. -func (s HeartbeatStream) BindStream(storeID uint64, stream hbstream.HeartbeatStream) {} +func (HeartbeatStream) BindStream(uint64, hbstream.HeartbeatStream) {} // Recv mocks method. func (s HeartbeatStream) Recv() core.RegionHeartbeatResponse { diff --git a/pkg/mock/mockid/mockid.go b/pkg/mock/mockid/mockid.go index 4c0e7540653..7b4902a6a04 100644 --- a/pkg/mock/mockid/mockid.go +++ b/pkg/mock/mockid/mockid.go @@ -38,6 +38,6 @@ func (alloc *IDAllocator) SetBase(newBase uint64) error { } // Rebase implements the IDAllocator interface. -func (alloc *IDAllocator) Rebase() error { +func (*IDAllocator) Rebase() error { return nil } diff --git a/pkg/ratelimit/controller_test.go b/pkg/ratelimit/controller_test.go index 50be24540e4..d4093555ba7 100644 --- a/pkg/ratelimit/controller_test.go +++ b/pkg/ratelimit/controller_test.go @@ -108,7 +108,7 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { status := limiter.Update(label, o) re.NotZero(status & ConcurrencyNoChange) }, - checkStatusFunc: func(label string) {}, + checkStatusFunc: func(_ string) {}, }, { opt: UpdateConcurrencyLimiter(5), @@ -240,7 +240,7 @@ func TestControllerWithQPSLimiter(t *testing.T) { status := limiter.Update(label, o) re.NotZero(status & QPSNoChange) }, - checkStatusFunc: func(label string) {}, + checkStatusFunc: func(_ string) {}, }, { opt: UpdateQPSLimiter(5, 5), diff --git a/pkg/replication/replication_mode_test.go b/pkg/replication/replication_mode_test.go index 038807d7d94..d19a4f70d66 100644 --- a/pkg/replication/replication_mode_test.go +++ b/pkg/replication/replication_mode_test.go @@ -144,7 +144,7 @@ func (rep *mockFileReplicator) GetMembers() ([]*pdpb.Member, error) { return members, nil } -func (rep *mockFileReplicator) ReplicateFileToMember(ctx context.Context, member *pdpb.Member, name string, data []byte) error { +func (rep *mockFileReplicator) ReplicateFileToMember(_ context.Context, member *pdpb.Member, _ string, data []byte) error { if err := rep.errors[member.GetMemberId()]; err != nil { return err } diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 1ce7bddd1dc..821c21cc119 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -94,7 +94,7 @@ func NewMergeChecker(ctx context.Context, cluster sche.CheckerCluster, conf conf } // GetType return MergeChecker's type -func (m *MergeChecker) GetType() string { +func (*MergeChecker) GetType() string { return "merge-checker" } diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index 3e23f3bdcac..6324fd2ca10 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -76,7 +76,7 @@ func NewReplicaChecker(cluster sche.CheckerCluster, conf config.CheckerConfigPro } // GetType return ReplicaChecker's type -func (r *ReplicaChecker) GetType() string { +func (*ReplicaChecker) GetType() string { return replicaCheckerName } diff --git a/pkg/schedule/checker/replica_strategy.go b/pkg/schedule/checker/replica_strategy.go index ad85e307bbe..e234189fe96 100644 --- a/pkg/schedule/checker/replica_strategy.go +++ b/pkg/schedule/checker/replica_strategy.go @@ -97,7 +97,7 @@ func (s *ReplicaStrategy) SelectStoreToFix(coLocationStores []*core.StoreInfo, o return 0, false } // trick to avoid creating a slice with `old` removed. - s.swapStoreToFirst(coLocationStores, old) + swapStoreToFirst(coLocationStores, old) // If the coLocationStores only has one store, no need to remove. // Otherwise, the other stores will be filtered. if len(coLocationStores) > 1 { @@ -113,7 +113,7 @@ func (s *ReplicaStrategy) SelectStoreToImprove(coLocationStores []*core.StoreInf return 0, false } // trick to avoid creating a slice with `old` removed. - s.swapStoreToFirst(coLocationStores, old) + swapStoreToFirst(coLocationStores, old) oldStore := s.cluster.GetStore(old) if oldStore == nil { return 0, false @@ -127,7 +127,7 @@ func (s *ReplicaStrategy) SelectStoreToImprove(coLocationStores []*core.StoreInf return s.SelectStoreToAdd(coLocationStores[1:], filters...) } -func (s *ReplicaStrategy) swapStoreToFirst(stores []*core.StoreInfo, id uint64) { +func swapStoreToFirst(stores []*core.StoreInfo, id uint64) { for i, s := range stores { if s.GetID() == id { stores[0], stores[i] = stores[i], stores[0] diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 464f5e97be8..66b958911b1 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -107,7 +107,7 @@ func NewRuleChecker(ctx context.Context, cluster sche.CheckerCluster, ruleManage } // GetType returns RuleChecker's Type -func (c *RuleChecker) GetType() string { +func (*RuleChecker) GetType() string { return ruleCheckerName } @@ -347,7 +347,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. if region.GetLeader().GetId() != peer.GetId() && rf.Rule.Role == placement.Leader { ruleCheckerFixLeaderRoleCounter.Inc() if c.allowLeader(fit, peer) { - return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, region.GetLeader().GetStoreId(), peer.GetStoreId(), []uint64{}, 0) + return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, peer.GetStoreId(), []uint64{}, 0) } ruleCheckerNotAllowLeaderCounter.Inc() return nil, errPeerCannotBeLeader @@ -356,7 +356,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. ruleCheckerFixFollowerRoleCounter.Inc() for _, p := range region.GetPeers() { if c.allowLeader(fit, p) { - return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, peer.GetStoreId(), p.GetStoreId(), []uint64{}, 0) + return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, p.GetStoreId(), []uint64{}, 0) } } ruleCheckerNoNewLeaderCounter.Inc() diff --git a/pkg/schedule/checker/split_checker.go b/pkg/schedule/checker/split_checker.go index 072bdcf7a2e..3a34eee8c90 100644 --- a/pkg/schedule/checker/split_checker.go +++ b/pkg/schedule/checker/split_checker.go @@ -51,7 +51,7 @@ func NewSplitChecker(cluster sche.CheckerCluster, ruleManager *placement.RuleMan } // GetType returns the checker type. -func (c *SplitChecker) GetType() string { +func (*SplitChecker) GetType() string { return "split-checker" } diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 56038ddcb09..abf4c776f8a 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -407,7 +407,7 @@ func (c *ScheduleConfig) Adjust(meta *configutil.ConfigMetaData, reloading bool) adjustSchedulers(&c.Schedulers, DefaultSchedulers) for k, b := range c.migrateConfigurationMap() { - v, err := c.parseDeprecatedFlag(meta, k, *b[0], *b[1]) + v, err := parseDeprecatedFlag(meta, k, *b[0], *b[1]) if err != nil { return err } @@ -456,7 +456,7 @@ func (c *ScheduleConfig) GetMaxMergeRegionKeys() uint64 { return c.MaxMergeRegionSize * 10000 } -func (c *ScheduleConfig) parseDeprecatedFlag(meta *configutil.ConfigMetaData, name string, old, new bool) (bool, error) { +func parseDeprecatedFlag(meta *configutil.ConfigMetaData, name string, old, new bool) (bool, error) { oldName, newName := "disable-"+name, "enable-"+name defineOld, defineNew := meta.IsDefined(oldName), meta.IsDefined(newName) switch { diff --git a/pkg/schedule/filter/candidates_test.go b/pkg/schedule/filter/candidates_test.go index 13e8ed661cc..0d805312ba7 100644 --- a/pkg/schedule/filter/candidates_test.go +++ b/pkg/schedule/filter/candidates_test.go @@ -48,9 +48,9 @@ func idComparer2(a, b *core.StoreInfo) int { type idFilter func(uint64) bool -func (f idFilter) Scope() string { return "idFilter" } -func (f idFilter) Type() filterType { return filterType(0) } -func (f idFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (idFilter) Scope() string { return "idFilter" } +func (idFilter) Type() filterType { return filterType(0) } +func (f idFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } @@ -58,7 +58,7 @@ func (f idFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo return statusStoreScoreDisallowed } -func (f idFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f idFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } diff --git a/pkg/schedule/filter/filters.go b/pkg/schedule/filter/filters.go index 0d188e69180..1838f0104f4 100644 --- a/pkg/schedule/filter/filters.go +++ b/pkg/schedule/filter/filters.go @@ -185,18 +185,18 @@ func (f *excludedFilter) Scope() string { return f.scope } -func (f *excludedFilter) Type() filterType { +func (*excludedFilter) Type() filterType { return excluded } -func (f *excludedFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if _, ok := f.sources[store.GetID()]; ok { return statusStoreAlreadyHasPeer } return statusOK } -func (f *excludedFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if _, ok := f.targets[store.GetID()]; ok { return statusStoreAlreadyHasPeer } @@ -215,15 +215,15 @@ func (f *storageThresholdFilter) Scope() string { return f.scope } -func (f *storageThresholdFilter) Type() filterType { +func (*storageThresholdFilter) Type() filterType { return storageThreshold } -func (f *storageThresholdFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (*storageThresholdFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } -func (f *storageThresholdFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (*storageThresholdFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if !store.IsLowSpace(conf.GetLowSpaceRatio()) { return statusOK } @@ -283,11 +283,11 @@ func (f *distinctScoreFilter) Scope() string { return f.scope } -func (f *distinctScoreFilter) Type() filterType { +func (*distinctScoreFilter) Type() filterType { return distinctScore } -func (f *distinctScoreFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*distinctScoreFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -387,7 +387,7 @@ func (f *StoreStateFilter) pauseLeaderTransfer(_ config.SharedConfigProvider, st return statusOK } -func (f *StoreStateFilter) slowStoreEvicted(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) slowStoreEvicted(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if store.EvictedAsSlowStore() { f.Reason = storeStateSlow return statusStoreRejectLeader @@ -583,12 +583,12 @@ func (f labelConstraintFilter) Scope() string { } // Type returns the name of the filter. -func (f labelConstraintFilter) Type() filterType { +func (labelConstraintFilter) Type() filterType { return labelConstraint } // Source filters stores when select them as schedule source. -func (f labelConstraintFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f labelConstraintFilter) Source(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if placement.MatchLabelConstraints(store, f.constraints) { return statusOK } @@ -634,11 +634,11 @@ func (f *ruleFitFilter) Scope() string { return f.scope } -func (f *ruleFitFilter) Type() filterType { +func (*ruleFitFilter) Type() filterType { return ruleFit } -func (f *ruleFitFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*ruleFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -687,11 +687,11 @@ func (f *ruleLeaderFitFilter) Scope() string { return f.scope } -func (f *ruleLeaderFitFilter) Type() filterType { +func (*ruleLeaderFitFilter) Type() filterType { return ruleLeader } -func (f *ruleLeaderFitFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*ruleLeaderFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -743,11 +743,11 @@ func (f *ruleWitnessFitFilter) Scope() string { return f.scope } -func (f *ruleWitnessFitFilter) Type() filterType { +func (*ruleWitnessFitFilter) Type() filterType { return ruleFit } -func (f *ruleWitnessFitFilter) Source(_ config.SharedConfigProvider, _ *core.StoreInfo) *plan.Status { +func (*ruleWitnessFitFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } @@ -815,7 +815,7 @@ func (f *engineFilter) Scope() string { return f.scope } -func (f *engineFilter) Type() filterType { +func (*engineFilter) Type() filterType { return engine } @@ -858,7 +858,7 @@ func (f *specialUseFilter) Scope() string { return f.scope } -func (f *specialUseFilter) Type() filterType { +func (*specialUseFilter) Type() filterType { return specialUse } @@ -869,7 +869,7 @@ func (f *specialUseFilter) Source(conf config.SharedConfigProvider, store *core. return statusStoreNotMatchRule } -func (f *specialUseFilter) Target(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (f *specialUseFilter) Target(_ config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { if !f.constraint.MatchStore(store) { return statusOK } @@ -932,11 +932,11 @@ func (f *isolationFilter) Scope() string { return f.scope } -func (f *isolationFilter) Type() filterType { +func (*isolationFilter) Type() filterType { return isolation } -func (f *isolationFilter) Source(conf config.SharedConfigProvider, store *core.StoreInfo) *plan.Status { +func (*isolationFilter) Source(config.SharedConfigProvider, *core.StoreInfo) *plan.Status { return statusOK } diff --git a/pkg/schedule/filter/region_filters.go b/pkg/schedule/filter/region_filters.go index 799cee7d90c..7cd015412c2 100644 --- a/pkg/schedule/filter/region_filters.go +++ b/pkg/schedule/filter/region_filters.go @@ -76,7 +76,7 @@ func NewRegionPendingFilter() RegionFilter { return ®ionPendingFilter{} } -func (f *regionPendingFilter) Select(region *core.RegionInfo) *plan.Status { +func (*regionPendingFilter) Select(region *core.RegionInfo) *plan.Status { if hasPendingPeers(region) { return statusRegionPendingPeer } @@ -91,7 +91,7 @@ func NewRegionDownFilter() RegionFilter { return ®ionDownFilter{} } -func (f *regionDownFilter) Select(region *core.RegionInfo) *plan.Status { +func (*regionDownFilter) Select(region *core.RegionInfo) *plan.Status { if hasDownPeers(region) { return statusRegionDownPeer } diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index a9b89e4e3a4..0541a2d6567 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -417,7 +417,7 @@ func (h *Handler) AddTransferLeaderOperator(regionID uint64, storeID uint64) err return errors.Errorf("region has no voter in store %v", storeID) } - op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, region.GetLeader().GetStoreId(), newLeader.GetStoreId(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, newLeader.GetStoreId(), []uint64{}, operator.OpAdmin) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) return err @@ -1157,7 +1157,7 @@ func (h *Handler) AccelerateRegionsScheduleInRanges(startKeys [][]byte, endKeys } // AdjustLimit adjusts the limit of regions to schedule. -func (h *Handler) AdjustLimit(limitStr string, defaultLimits ...int) (int, error) { +func (*Handler) AdjustLimit(limitStr string, defaultLimits ...int) (int, error) { limit := defaultRegionLimit if len(defaultLimits) > 0 { limit = defaultLimits[0] @@ -1181,7 +1181,7 @@ type ScatterRegionsResponse struct { } // BuildScatterRegionsResp builds ScatterRegionsResponse. -func (h *Handler) BuildScatterRegionsResp(opsCount int, failures map[uint64]error) *ScatterRegionsResponse { +func (*Handler) BuildScatterRegionsResp(opsCount int, failures map[uint64]error) *ScatterRegionsResponse { // If there existed any operator failed to be added into Operator Controller, add its regions into unProcessedRegions percentage := 100 if len(failures) > 0 { @@ -1217,7 +1217,7 @@ func (h *Handler) ScatterRegionsByRange(rawStartKey, rawEndKey string, group str } // ScatterRegionsByID scatters regions by id. -func (h *Handler) ScatterRegionsByID(ids []uint64, group string, retryLimit int, skipStoreLimit bool) (int, map[uint64]error, error) { +func (h *Handler) ScatterRegionsByID(ids []uint64, group string, retryLimit int) (int, map[uint64]error, error) { co := h.GetCoordinator() if co == nil { return 0, nil, errs.ErrNotBootstrapped.GenWithStackByArgs() diff --git a/pkg/schedule/labeler/labeler_test.go b/pkg/schedule/labeler/labeler_test.go index 364f79b7a14..bd51bab7d83 100644 --- a/pkg/schedule/labeler/labeler_test.go +++ b/pkg/schedule/labeler/labeler_test.go @@ -404,7 +404,7 @@ func TestLabelerRuleTTL(t *testing.T) { func checkRuleInMemoryAndStorage(re *require.Assertions, labeler *RegionLabeler, ruleID string, exist bool) { re.Equal(exist, labeler.labelRules[ruleID] != nil) existInStorage := false - labeler.storage.LoadRegionRules(func(k, v string) { + labeler.storage.LoadRegionRules(func(k, _ string) { if k == ruleID { existInStorage = true } diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 1c96128ab32..638230e3097 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -78,7 +78,7 @@ func CreateRemovePeerOperator(desc string, ci sche.SharedCluster, kind OpKind, r } // CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. -func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { +func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck). SetLeader(targetStoreID). SetLeaders(targetStoreIDs). @@ -86,7 +86,7 @@ func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *co } // CreateForceTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store forcible. -func CreateForceTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, sourceStoreID uint64, targetStoreID uint64, kind OpKind) (*Operator, error) { +func CreateForceTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck, SkipPlacementRulesCheck). SetLeader(targetStoreID). EnableForceTargetLeader(). diff --git a/pkg/schedule/operator/create_operator_test.go b/pkg/schedule/operator/create_operator_test.go index 80c6cac4a04..d481334bbcb 100644 --- a/pkg/schedule/operator/create_operator_test.go +++ b/pkg/schedule/operator/create_operator_test.go @@ -423,7 +423,7 @@ func (suite *createOperatorTestSuite) TestCreateTransferLeaderOperator() { } for _, testCase := range testCases { region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.originPeers}, testCase.originPeers[0]) - op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.originPeers[0].StoreId, testCase.targetLeaderStoreID, []uint64{}, 0) + op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.targetLeaderStoreID, []uint64{}, 0) if testCase.isErr { re.Error(err) diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index b87a050969f..8c0986218bc 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -531,6 +531,7 @@ const ( mockBrief = "test" ) +// nolint // NewTestOperator creates a test operator, only used for unit test. func NewTestOperator(regionID uint64, regionEpoch *metapb.RegionEpoch, kind OpKind, steps ...OpStep) *Operator { // OpSteps can not be empty for test. diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index f05c232904f..c02b7ce4663 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -222,7 +222,7 @@ func (oc *Controller) checkStaleOperator(op *Operator, step OpStep, region *core return false } -func (oc *Controller) getNextPushOperatorTime(step OpStep, now time.Time) time.Time { +func getNextPushOperatorTime(step OpStep, now time.Time) time.Time { nextTime := slowNotifyInterval switch step.(type) { case TransferLeader, PromoteLearner, ChangePeerV2Enter, ChangePeerV2Leave: @@ -270,7 +270,7 @@ func (oc *Controller) pollNeedDispatchRegion() (r *core.RegionInfo, next bool) { } // pushes with new notify time. - item.time = oc.getNextPushOperatorTime(step, now) + item.time = getNextPushOperatorTime(step, now) oc.opNotifierQueue.Push(item) return r, true } @@ -559,7 +559,7 @@ func (oc *Controller) addOperatorInner(op *Operator) bool { } } - oc.opNotifierQueue.Push(&operatorWithTime{op: op, time: oc.getNextPushOperatorTime(step, time.Now())}) + oc.opNotifierQueue.Push(&operatorWithTime{op: op, time: getNextPushOperatorTime(step, time.Now())}) operatorCounter.WithLabelValues(op.Desc(), "create").Inc() for _, counter := range op.Counters { counter.Inc() diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index f2f2b7305ce..d3c50667fe0 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -108,7 +108,7 @@ func (suite *operatorControllerTestSuite) TestGetOpInfluence() { re.True(op2.Start()) oc.SetOperator(op2) go func(ctx context.Context) { - suite.checkRemoveOperatorSuccess(re, oc, op1) + checkRemoveOperatorSuccess(re, oc, op1) for { select { case <-ctx.Done(): @@ -550,7 +550,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 5; i++ { op := NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: i}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op := NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: 1}) re.False(oc.AddOperator(op)) @@ -560,13 +560,13 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 10; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: i}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } tc.SetAllStoresLimit(storelimit.AddPeer, 60) for i := uint64(1); i <= 5; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: i}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op = NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, AddPeer{ToStore: 2, PeerID: 1}) re.False(oc.AddOperator(op)) @@ -576,7 +576,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 5; i++ { op := NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op = NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.False(oc.AddOperator(op)) @@ -586,13 +586,13 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { for i := uint64(1); i <= 10; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } tc.SetAllStoresLimit(storelimit.RemovePeer, 60) for i := uint64(1); i <= 5; i++ { op = NewTestOperator(i, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.True(oc.AddOperator(op)) - suite.checkRemoveOperatorSuccess(re, oc, op) + checkRemoveOperatorSuccess(re, oc, op) } op = NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion, RemovePeer{FromStore: 2}) re.False(oc.AddOperator(op)) @@ -860,7 +860,7 @@ func newRegionInfo(id uint64, startKey, endKey string, size, keys int64, leader ) } -func (suite *operatorControllerTestSuite) checkRemoveOperatorSuccess(re *require.Assertions, oc *Controller, op *Operator) { +func checkRemoveOperatorSuccess(re *require.Assertions, oc *Controller, op *Operator) { re.True(oc.RemoveOperator(op)) re.True(op.IsEnd()) re.Equal(op, oc.GetOperatorStatus(op.RegionID()).Operator) diff --git a/pkg/schedule/operator/operator_test.go b/pkg/schedule/operator/operator_test.go index 4719df9408b..809430caeb2 100644 --- a/pkg/schedule/operator/operator_test.go +++ b/pkg/schedule/operator/operator_test.go @@ -65,7 +65,7 @@ func (suite *operatorTestSuite) TearDownTest() { suite.cancel() } -func (suite *operatorTestSuite) newTestRegion(regionID uint64, leaderPeer uint64, peers ...[2]uint64) *core.RegionInfo { +func newTestRegion(regionID uint64, leaderPeer uint64, peers ...[2]uint64) *core.RegionInfo { var ( region metapb.Region leader *metapb.Peer @@ -87,7 +87,7 @@ func (suite *operatorTestSuite) newTestRegion(regionID uint64, leaderPeer uint64 func (suite *operatorTestSuite) TestOperatorStep() { re := suite.Require() - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) re.False(TransferLeader{FromStore: 1, ToStore: 2}.IsFinish(region)) re.True(TransferLeader{FromStore: 2, ToStore: 1}.IsFinish(region)) re.False(AddPeer{ToStore: 3, PeerID: 3}.IsFinish(region)) @@ -96,11 +96,12 @@ func (suite *operatorTestSuite) TestOperatorStep() { re.True(RemovePeer{FromStore: 3}.IsFinish(region)) } -func (suite *operatorTestSuite) newTestOperator(regionID uint64, kind OpKind, steps ...OpStep) *Operator { +// nolint +func newTestOperator(regionID uint64, kind OpKind, steps ...OpStep) *Operator { return NewTestOperator(regionID, &metapb.RegionEpoch{}, kind, steps...) } -func (suite *operatorTestSuite) checkSteps(re *require.Assertions, op *Operator, steps []OpStep) { +func checkSteps(re *require.Assertions, op *Operator, steps []OpStep) { re.Len(steps, op.Len()) for i := range steps { re.Equal(steps[i], op.Step(i)) @@ -109,16 +110,16 @@ func (suite *operatorTestSuite) checkSteps(re *require.Assertions, op *Operator, func (suite *operatorTestSuite) TestOperator() { re := suite.Require() - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) // addPeer1, transferLeader1, removePeer3 steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 3, ToStore: 1}, RemovePeer{FromStore: 3}, } - op := suite.newTestOperator(1, OpAdmin|OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpAdmin|OpLeader|OpRegion, steps...) re.Equal(constant.Urgent, op.GetPriorityLevel()) - suite.checkSteps(re, op, steps) + checkSteps(re, op, steps) op.Start() re.Nil(op.Check(region)) @@ -132,9 +133,9 @@ func (suite *operatorTestSuite) TestOperator() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op = suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op = newTestOperator(1, OpLeader|OpRegion, steps...) re.Equal(constant.Medium, op.GetPriorityLevel()) - suite.checkSteps(re, op, steps) + checkSteps(re, op, steps) op.Start() re.Equal(RemovePeer{FromStore: 2}, op.Check(region)) re.Equal(int32(2), atomic.LoadInt32(&op.currentStep)) @@ -149,7 +150,7 @@ func (suite *operatorTestSuite) TestOperator() { // check short timeout for transfer leader only operators. steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} - op = suite.newTestOperator(1, OpLeader, steps...) + op = newTestOperator(1, OpLeader, steps...) op.Start() re.False(op.CheckTimeout()) op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) @@ -166,7 +167,7 @@ func (suite *operatorTestSuite) TestOperator() { func (suite *operatorTestSuite) TestInfluence() { re := suite.Require() - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) opInfluence := OpInfluence{StoresInfluence: make(map[uint64]*StoreInfluence)} storeOpInfluence := opInfluence.StoresInfluence storeOpInfluence[1] = &StoreInfluence{} @@ -309,7 +310,7 @@ func (suite *operatorTestSuite) TestCheckSuccess() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.False(op.CheckSuccess()) re.True(op.Start()) @@ -324,7 +325,7 @@ func (suite *operatorTestSuite) TestCheckSuccess() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) op.currentStep = int32(len(op.steps)) re.Equal(CREATED, op.Status()) re.False(op.CheckSuccess()) @@ -342,7 +343,7 @@ func (suite *operatorTestSuite) TestCheckTimeout() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.True(op.Start()) op.currentStep = int32(len(op.steps)) @@ -355,7 +356,7 @@ func (suite *operatorTestSuite) TestCheckTimeout() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.True(op.Start()) op.currentStep = int32(len(op.steps)) @@ -372,7 +373,7 @@ func (suite *operatorTestSuite) TestStart() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.Equal(0, op.GetStartTime().Nanosecond()) re.Equal(CREATED, op.Status()) re.True(op.Start()) @@ -387,7 +388,7 @@ func (suite *operatorTestSuite) TestCheckExpired() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.False(op.CheckExpired()) re.Equal(CREATED, op.Status()) op.SetStatusReachTime(CREATED, time.Now().Add(-OperatorExpireTime)) @@ -398,30 +399,30 @@ func (suite *operatorTestSuite) TestCheckExpired() { func (suite *operatorTestSuite) TestCheck() { re := suite.Require() { - region := suite.newTestRegion(2, 2, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(2, 2, [2]uint64{1, 1}, [2]uint64{2, 2}) steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(2, OpLeader|OpRegion, steps...) + op := newTestOperator(2, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) - region = suite.newTestRegion(1, 1, [2]uint64{1, 1}) + region = newTestRegion(1, 1, [2]uint64{1, 1}) re.Nil(op.Check(region)) re.Equal(SUCCESS, op.Status()) } { - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) @@ -430,18 +431,18 @@ func (suite *operatorTestSuite) TestCheck() { re.Equal(TIMEOUT, op.Status()) } { - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) steps := []OpStep{ AddPeer{ToStore: 1, PeerID: 1}, TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := suite.newTestOperator(1, OpLeader|OpRegion, steps...) + op := newTestOperator(1, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) op.status.setTime(STARTED, time.Now().Add(-SlowStepWaitTime)) - region = suite.newTestRegion(1, 1, [2]uint64{1, 1}) + region = newTestRegion(1, 1, [2]uint64{1, 1}) re.Nil(op.Check(region)) re.Equal(SUCCESS, op.Status()) } @@ -454,28 +455,28 @@ func (suite *operatorTestSuite) TestSchedulerKind() { expect OpKind }{ { - op: suite.newTestOperator(1, OpAdmin|OpMerge|OpRegion), + op: newTestOperator(1, OpAdmin|OpMerge|OpRegion), expect: OpAdmin, }, { - op: suite.newTestOperator(1, OpMerge|OpLeader|OpRegion), + op: newTestOperator(1, OpMerge|OpLeader|OpRegion), expect: OpMerge, }, { - op: suite.newTestOperator(1, OpReplica|OpRegion), + op: newTestOperator(1, OpReplica|OpRegion), expect: OpReplica, }, { - op: suite.newTestOperator(1, OpSplit|OpRegion), + op: newTestOperator(1, OpSplit|OpRegion), expect: OpSplit, }, { - op: suite.newTestOperator(1, OpRange|OpRegion), + op: newTestOperator(1, OpRange|OpRegion), expect: OpRange, }, { - op: suite.newTestOperator(1, OpHotRegion|OpLeader|OpRegion), + op: newTestOperator(1, OpHotRegion|OpLeader|OpRegion), expect: OpHotRegion, }, { - op: suite.newTestOperator(1, OpRegion|OpLeader), + op: newTestOperator(1, OpRegion|OpLeader), expect: OpRegion, }, { - op: suite.newTestOperator(1, OpLeader), + op: newTestOperator(1, OpLeader), expect: OpLeader, }, } @@ -534,7 +535,7 @@ func (suite *operatorTestSuite) TestOpStepTimeout() { func (suite *operatorTestSuite) TestRecord() { re := suite.Require() - operator := suite.newTestOperator(1, OpLeader, AddLearner{ToStore: 1, PeerID: 1}, RemovePeer{FromStore: 1, PeerID: 1}) + operator := newTestOperator(1, OpLeader, AddLearner{ToStore: 1, PeerID: 1}, RemovePeer{FromStore: 1, PeerID: 1}) now := time.Now() time.Sleep(time.Second) ob := operator.Record(now) @@ -548,7 +549,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { TransferLeader{FromStore: 3, ToStore: 1}, RemovePeer{FromStore: 3}, } - op := suite.newTestOperator(101, OpLeader|OpRegion, steps...) + op := newTestOperator(101, OpLeader|OpRegion, steps...) op.Start() obj := op.ToJSONObject() suite.Equal("test", obj.Desc) @@ -559,7 +560,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { suite.Equal(STARTED, obj.Status) // Test SUCCESS status. - region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + region := newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) suite.Nil(op.Check(region)) suite.Equal(SUCCESS, op.Status()) obj = op.ToJSONObject() @@ -567,7 +568,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { // Test TIMEOUT status. steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} - op = suite.newTestOperator(1, OpLeader, steps...) + op = newTestOperator(1, OpLeader, steps...) op.Start() op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) suite.True(op.CheckTimeout()) diff --git a/pkg/schedule/operator/status_tracker.go b/pkg/schedule/operator/status_tracker.go index e103a74ccb3..0ba8135750c 100644 --- a/pkg/schedule/operator/status_tracker.go +++ b/pkg/schedule/operator/status_tracker.go @@ -64,9 +64,8 @@ func (trk *OpStatusTracker) getTime(s OpStatus) time.Time { return trk.reachTimes[s] } else if trk.current == s { return trk.reachTimes[firstEndStatus] - } else { - return time.Time{} } + return time.Time{} } // To transfer the current status to dst if this transition is valid, diff --git a/pkg/schedule/operator/step.go b/pkg/schedule/operator/step.go index 6f14cbb326b..04e41028865 100644 --- a/pkg/schedule/operator/step.go +++ b/pkg/schedule/operator/step.go @@ -70,7 +70,7 @@ type TransferLeader struct { } // ConfVerChanged returns the delta value for version increased by this step. -func (tl TransferLeader) ConfVerChanged(_ *core.RegionInfo) uint64 { +func (TransferLeader) ConfVerChanged(_ *core.RegionInfo) uint64 { return 0 // transfer leader never change the conf version } @@ -122,12 +122,12 @@ func (tl TransferLeader) Influence(opInfluence OpInfluence, region *core.RegionI } // Timeout returns duration that current step may take. -func (tl TransferLeader) Timeout(regionSize int64) time.Duration { +func (TransferLeader) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } // GetCmd returns the schedule command for heartbeat response. -func (tl TransferLeader) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (tl TransferLeader) GetCmd(region *core.RegionInfo, _ bool) *hbstream.Operation { peers := make([]*metapb.Peer, 0, len(tl.ToStores)) for _, storeID := range tl.ToStores { peers = append(peers, region.GetStorePeer(storeID)) @@ -206,7 +206,7 @@ func (ap AddPeer) CheckInProgress(ci *core.BasicCluster, config config.SharedCon } // Timeout returns duration that current step may take. -func (ap AddPeer) Timeout(regionSize int64) time.Duration { +func (AddPeer) Timeout(regionSize int64) time.Duration { return slowStepWaitDuration(regionSize) } @@ -270,7 +270,7 @@ func (bw BecomeWitness) Influence(opInfluence OpInfluence, region *core.RegionIn } // Timeout returns duration that current step may take. -func (bw BecomeWitness) Timeout(regionSize int64) time.Duration { +func (BecomeWitness) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -338,12 +338,12 @@ func (bn BecomeNonWitness) Influence(opInfluence OpInfluence, region *core.Regio } // Timeout returns duration that current step may take. -func (bn BecomeNonWitness) Timeout(regionSize int64) time.Duration { +func (BecomeNonWitness) Timeout(regionSize int64) time.Duration { return slowStepWaitDuration(regionSize) } // GetCmd returns the schedule command for heartbeat response. -func (bn BecomeNonWitness) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (bn BecomeNonWitness) GetCmd(*core.RegionInfo, bool) *hbstream.Operation { return switchWitness(bn.PeerID, false) } @@ -518,7 +518,7 @@ func (al AddLearner) Influence(opInfluence OpInfluence, region *core.RegionInfo) } // Timeout returns duration that current step may take. -func (al AddLearner) Timeout(regionSize int64) time.Duration { +func (AddLearner) Timeout(regionSize int64) time.Duration { return slowStepWaitDuration(regionSize) } @@ -565,7 +565,7 @@ func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { peer := region.GetStorePeer(pl.ToStore) if peer.GetId() != pl.PeerID { return errors.New("peer does not exist") @@ -574,10 +574,10 @@ func (pl PromoteLearner) CheckInProgress(_ *core.BasicCluster, config config.Sha } // Influence calculates the store difference that current step makes. -func (pl PromoteLearner) Influence(_ OpInfluence, _ *core.RegionInfo) {} +func (PromoteLearner) Influence(OpInfluence, *core.RegionInfo) {} // Timeout returns duration that current step may take. -func (pl PromoteLearner) Timeout(regionSize int64) time.Duration { +func (PromoteLearner) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -617,7 +617,7 @@ func (rp RemovePeer) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (rp RemovePeer) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (rp RemovePeer) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { if rp.FromStore == region.GetLeader().GetStoreId() { return errors.New("cannot remove leader peer") } @@ -648,7 +648,7 @@ func (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) } // Timeout returns duration that current step may take. -func (rp RemovePeer) Timeout(regionSize int64) time.Duration { +func (RemovePeer) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -674,7 +674,7 @@ type MergeRegion struct { } // ConfVerChanged returns the delta value for version increased by this step. -func (mr MergeRegion) ConfVerChanged(_ *core.RegionInfo) uint64 { +func (MergeRegion) ConfVerChanged(*core.RegionInfo) uint64 { return 0 } @@ -691,7 +691,7 @@ func (mr MergeRegion) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (mr MergeRegion) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, _ *core.RegionInfo) error { +func (MergeRegion) CheckInProgress(*core.BasicCluster, config.SharedConfigProvider, *core.RegionInfo) error { return nil } @@ -710,12 +710,12 @@ func (mr MergeRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo // Timeout returns duration that current step may take. // The merge step need more time to finish but less than slow step. -func (mr MergeRegion) Timeout(regionSize int64) time.Duration { +func (MergeRegion) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) * 10 } // GetCmd returns the schedule command for heartbeat response. -func (mr MergeRegion) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (mr MergeRegion) GetCmd(*core.RegionInfo, bool) *hbstream.Operation { if mr.IsPassive { return nil } @@ -734,7 +734,7 @@ type SplitRegion struct { } // ConfVerChanged returns the delta value for version increased by this step. -func (sr SplitRegion) ConfVerChanged(_ *core.RegionInfo) uint64 { +func (SplitRegion) ConfVerChanged(*core.RegionInfo) uint64 { return 0 } @@ -748,7 +748,7 @@ func (sr SplitRegion) IsFinish(region *core.RegionInfo) bool { } // Influence calculates the store difference that current step makes. -func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) { +func (SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo) { for _, peer := range region.GetPeers() { inf := opInfluence.GetStoreInfluence(peer.GetStoreId()) inf.RegionCount++ @@ -759,17 +759,17 @@ func (sr SplitRegion) Influence(opInfluence OpInfluence, region *core.RegionInfo } // CheckInProgress checks if the step is in the progress of advancing. -func (sr SplitRegion) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, _ *core.RegionInfo) error { +func (SplitRegion) CheckInProgress(*core.BasicCluster, config.SharedConfigProvider, *core.RegionInfo) error { return nil } // Timeout returns duration that current step may take. -func (sr SplitRegion) Timeout(regionSize int64) time.Duration { +func (SplitRegion) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } // GetCmd returns the schedule command for heartbeat response. -func (sr SplitRegion) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (sr SplitRegion) GetCmd(*core.RegionInfo, bool) *hbstream.Operation { return &hbstream.Operation{ SplitRegion: &pdpb.SplitRegion{ Policy: sr.Policy, @@ -814,7 +814,7 @@ func (dv DemoteVoter) IsFinish(region *core.RegionInfo) bool { } // Timeout returns duration that current step may take. -func (dv DemoteVoter) Timeout(regionSize int64) time.Duration { +func (DemoteVoter) Timeout(regionSize int64) time.Duration { return fastStepWaitDuration(regionSize) } @@ -884,7 +884,7 @@ func (cpe ChangePeerV2Enter) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { inJointState, notInJointState := false, false for _, pl := range cpe.PromoteLearners { peer := region.GetStorePeer(pl.ToStore) @@ -932,7 +932,7 @@ func (cpe ChangePeerV2Enter) CheckInProgress(_ *core.BasicCluster, config config } // Influence calculates the store difference that current step makes. -func (cpe ChangePeerV2Enter) Influence(_ OpInfluence, _ *core.RegionInfo) {} +func (ChangePeerV2Enter) Influence(OpInfluence, *core.RegionInfo) {} // Timeout returns duration that current step may take. func (cpe ChangePeerV2Enter) Timeout(regionSize int64) time.Duration { @@ -1013,7 +1013,7 @@ func (cpl ChangePeerV2Leave) IsFinish(region *core.RegionInfo) bool { } // CheckInProgress checks if the step is in the progress of advancing. -func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, config config.SharedConfigProvider, region *core.RegionInfo) error { +func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, _ config.SharedConfigProvider, region *core.RegionInfo) error { inJointState, notInJointState, demoteLeader := false, false, false leaderStoreID := region.GetLeader().GetStoreId() @@ -1072,7 +1072,7 @@ func (cpl ChangePeerV2Leave) CheckInProgress(_ *core.BasicCluster, config config } // Influence calculates the store difference that current step makes. -func (cpl ChangePeerV2Leave) Influence(_ OpInfluence, _ *core.RegionInfo) {} +func (ChangePeerV2Leave) Influence(OpInfluence, *core.RegionInfo) {} // Timeout returns duration that current step may take. func (cpl ChangePeerV2Leave) Timeout(regionSize int64) time.Duration { @@ -1081,7 +1081,7 @@ func (cpl ChangePeerV2Leave) Timeout(regionSize int64) time.Duration { } // GetCmd returns the schedule command for heartbeat response. -func (cpl ChangePeerV2Leave) GetCmd(region *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { +func (ChangePeerV2Leave) GetCmd(_ *core.RegionInfo, useConfChangeV2 bool) *hbstream.Operation { if !useConfChangeV2 { // only supported in ChangePeerV2 return nil diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index e11e8492765..b4e6feb332c 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -164,7 +164,7 @@ func (handler *balanceLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http handler.rd.JSON(w, httpCode, v) } -func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *balanceLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -219,7 +219,7 @@ func (l *balanceLeaderScheduler) GetName() string { return l.name } -func (l *balanceLeaderScheduler) GetType() string { +func (*balanceLeaderScheduler) GetType() string { return BalanceLeaderType } @@ -553,7 +553,7 @@ func (l *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.Region.GetLeader().GetStoreId(), solver.TargetStoreID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(BalanceLeaderType, solver, solver.Region, solver.TargetStoreID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create balance leader operator", errs.ZapError(err)) if collector != nil { diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 1cef3a4615b..98e3be6e08a 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -96,7 +96,7 @@ func (s *balanceRegionScheduler) GetName() string { return s.conf.Name } -func (s *balanceRegionScheduler) GetType() string { +func (*balanceRegionScheduler) GetType() string { return BalanceRegionType } diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 68332d7067e..234acfd6d26 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -186,7 +186,7 @@ func TestTolerantRatio(t *testing.T) { kind constant.ScheduleKind expectTolerantResource func(constant.ScheduleKind) int64 }{ - {0, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(k constant.ScheduleKind) int64 { + {0, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(constant.ScheduleKind) int64 { return int64(leaderTolerantSizeRatio) }}, {0, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.BySize}, func(k constant.ScheduleKind) int64 { @@ -198,7 +198,7 @@ func TestTolerantRatio(t *testing.T) { {0, constant.ScheduleKind{Resource: constant.RegionKind, Policy: constant.BySize}, func(k constant.ScheduleKind) int64 { return int64(adjustTolerantRatio(tc, k) * float64(regionSize)) }}, - {10, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(k constant.ScheduleKind) int64 { + {10, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.ByCount}, func(constant.ScheduleKind) int64 { return int64(tc.GetScheduleConfig().TolerantSizeRatio) }}, {10, constant.ScheduleKind{Resource: constant.LeaderKind, Policy: constant.BySize}, func(k constant.ScheduleKind) int64 { diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index aee112c9dc1..3c4776c4666 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -150,7 +150,7 @@ func (handler *balanceWitnessHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, httpCode, v) } -func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *balanceWitnessHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -214,7 +214,7 @@ func (b *balanceWitnessScheduler) GetName() string { return b.name } -func (b *balanceWitnessScheduler) GetType() string { +func (*balanceWitnessScheduler) GetType() string { return BalanceWitnessType } diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index f4c8c577767..f3772757ad3 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -68,32 +68,32 @@ func NewBaseScheduler(opController *operator.Controller) *BaseScheduler { return &BaseScheduler{OpController: opController} } -func (s *BaseScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (*BaseScheduler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { fmt.Fprintf(w, "not implements") } // GetMinInterval returns the minimal interval for the scheduler -func (s *BaseScheduler) GetMinInterval() time.Duration { +func (*BaseScheduler) GetMinInterval() time.Duration { return MinScheduleInterval } // EncodeConfig encode config for the scheduler -func (s *BaseScheduler) EncodeConfig() ([]byte, error) { +func (*BaseScheduler) EncodeConfig() ([]byte, error) { return EncodeConfig(nil) } // ReloadConfig reloads the config from the storage. // By default, the scheduler does not need to reload the config // if it doesn't support the dynamic configuration. -func (s *BaseScheduler) ReloadConfig() error { return nil } +func (*BaseScheduler) ReloadConfig() error { return nil } // GetNextInterval return the next interval for the scheduler -func (s *BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { +func (*BaseScheduler) GetNextInterval(interval time.Duration) time.Duration { return intervalGrow(interval, MaxScheduleInterval, exponentialGrowth) } // PrepareConfig does some prepare work about config. -func (s *BaseScheduler) PrepareConfig(cluster sche.SchedulerCluster) error { return nil } +func (*BaseScheduler) PrepareConfig(sche.SchedulerCluster) error { return nil } // CleanConfig does some cleanup work about config. -func (s *BaseScheduler) CleanConfig(cluster sche.SchedulerCluster) {} +func (*BaseScheduler) CleanConfig(sche.SchedulerCluster) {} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 5cd59583767..3750834a82d 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -118,7 +118,7 @@ func (conf *evictLeaderSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *evictLeaderSchedulerConfig) getSchedulerName() string { +func (*evictLeaderSchedulerConfig) getSchedulerName() string { return EvictLeaderName } @@ -190,11 +190,11 @@ func (s *evictLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (s *evictLeaderScheduler) GetName() string { +func (*evictLeaderScheduler) GetName() string { return EvictLeaderName } -func (s *evictLeaderScheduler) GetType() string { +func (*evictLeaderScheduler) GetType() string { return EvictLeaderType } @@ -251,7 +251,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } -func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() return scheduleEvictLeaderBatch(s.GetName(), s.GetType(), cluster, s.conf, EvictLeaderBatchSize), nil } @@ -338,7 +338,7 @@ func scheduleEvictLeaderOnce(name, typ string, cluster sche.SchedulerCluster, co for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - op, err := operator.CreateTransferLeaderOperator(typ, cluster, region, region.GetLeader().GetStoreId(), target.GetID(), targetIDs, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpLeader) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue @@ -395,7 +395,7 @@ func (handler *evictLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } -func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index aa48d0bc9e9..ab30b256823 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -177,7 +177,7 @@ func (handler *evictSlowStoreHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, http.StatusOK, "Config updated.") } -func (handler *evictSlowStoreHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictSlowStoreHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -192,11 +192,11 @@ func (s *evictSlowStoreScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (s *evictSlowStoreScheduler) GetName() string { +func (*evictSlowStoreScheduler) GetName() string { return EvictSlowStoreName } -func (s *evictSlowStoreScheduler) GetType() string { +func (*evictSlowStoreScheduler) GetType() string { return EvictSlowStoreType } @@ -280,7 +280,7 @@ func (s *evictSlowStoreScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return true } -func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictSlowStoreCounter.Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index d919c1c0f0a..da3dbc24e95 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -263,7 +263,7 @@ func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *htt handler.rd.JSON(w, http.StatusOK, "Config updated.") } -func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } @@ -274,7 +274,7 @@ type evictSlowTrendScheduler struct { handler http.Handler } -func (s *evictSlowTrendScheduler) GetNextInterval(interval time.Duration) time.Duration { +func (s *evictSlowTrendScheduler) GetNextInterval(time.Duration) time.Duration { var growthType intervalGrowthType // If it already found a slow node as candidate, the next interval should be shorter // to make the next scheduling as soon as possible. This adjustment will decrease the @@ -291,11 +291,11 @@ func (s *evictSlowTrendScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reque s.handler.ServeHTTP(w, r) } -func (s *evictSlowTrendScheduler) GetName() string { +func (*evictSlowTrendScheduler) GetName() string { return EvictSlowTrendName } -func (s *evictSlowTrendScheduler) GetType() string { +func (*evictSlowTrendScheduler) GetType() string { return EvictSlowTrendType } @@ -384,7 +384,7 @@ func (s *evictSlowTrendScheduler) IsScheduleAllowed(cluster sche.SchedulerCluste return allowed } -func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() var ops []*operator.Operator diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 262dfe73873..56ed7cd730e 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -108,7 +108,7 @@ func (conf *grantHotRegionSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *grantHotRegionSchedulerConfig) getSchedulerName() string { +func (*grantHotRegionSchedulerConfig) getSchedulerName() string { return GrantHotRegionName } @@ -148,11 +148,11 @@ func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHo return ret } -func (s *grantHotRegionScheduler) GetName() string { +func (*grantHotRegionScheduler) GetName() string { return GrantHotRegionName } -func (s *grantHotRegionScheduler) GetType() string { +func (*grantHotRegionScheduler) GetType() string { return GrantHotRegionType } @@ -256,7 +256,7 @@ func newGrantHotRegionHandler(config *grantHotRegionSchedulerConfig) http.Handle return router } -func (s *grantHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { grantHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -352,7 +352,7 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.SchedulerCluster, region dstStore := &metapb.Peer{StoreId: destStoreIDs[i]} if isLeader { - op, err = operator.CreateTransferLeaderOperator(GrantHotRegionType+"-leader", cluster, srcRegion, srcRegion.GetLeader().GetStoreId(), dstStore.StoreId, []uint64{}, operator.OpLeader) + op, err = operator.CreateTransferLeaderOperator(GrantHotRegionType+"-leader", cluster, srcRegion, dstStore.StoreId, []uint64{}, operator.OpLeader) } else { op, err = operator.CreateMovePeerOperator(GrantHotRegionType+"-move", cluster, srcRegion, operator.OpRegion|operator.OpLeader, srcStore.GetID(), dstStore) } diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 8d36a5ae1c3..5de898489d9 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -98,7 +98,7 @@ func (conf *grantLeaderSchedulerConfig) Persist() error { return conf.storage.SaveSchedulerConfig(name, data) } -func (conf *grantLeaderSchedulerConfig) getSchedulerName() string { +func (*grantLeaderSchedulerConfig) getSchedulerName() string { return GrantLeaderName } @@ -176,11 +176,11 @@ func (s *grantLeaderScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) s.handler.ServeHTTP(w, r) } -func (s *grantLeaderScheduler) GetName() string { +func (*grantLeaderScheduler) GetName() string { return GrantLeaderName } -func (s *grantLeaderScheduler) GetType() string { +func (*grantLeaderScheduler) GetType() string { return GrantLeaderType } @@ -235,7 +235,7 @@ func (s *grantLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } -func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { grantLeaderCounter.Inc() storeIDWithRanges := s.conf.getStoreIDWithRanges() ops := make([]*operator.Operator, 0, len(storeIDWithRanges)) @@ -248,7 +248,7 @@ func (s *grantLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bo continue } - op, err := operator.CreateForceTransferLeaderOperator(GrantLeaderType, cluster, region, region.GetLeader().GetStoreId(), id, operator.OpLeader) + op, err := operator.CreateForceTransferLeaderOperator(GrantLeaderType, cluster, region, id, operator.OpLeader) if err != nil { log.Debug("fail to create grant leader operator", errs.ZapError(err)) continue @@ -306,7 +306,7 @@ func (handler *grantLeaderHandler) UpdateConfig(w http.ResponseWriter, r *http.R handler.rd.JSON(w, http.StatusOK, "The scheduler has been applied to the store.") } -func (handler *grantLeaderHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *grantLeaderHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 2a38ef399c8..b6293c2dac9 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -254,7 +254,7 @@ func (h *hotScheduler) GetName() string { return h.name } -func (h *hotScheduler) GetType() string { +func (*hotScheduler) GetType() string { return HotRegionType } @@ -306,11 +306,11 @@ func (h *hotScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.conf.ServeHTTP(w, r) } -func (h *hotScheduler) GetMinInterval() time.Duration { +func (*hotScheduler) GetMinInterval() time.Duration { return minHotScheduleInterval } -func (h *hotScheduler) GetNextInterval(interval time.Duration) time.Duration { +func (h *hotScheduler) GetNextInterval(time.Duration) time.Duration { return intervalGrow(h.GetMinInterval(), maxHotScheduleInterval, exponentialGrowth) } @@ -322,7 +322,7 @@ func (h *hotScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return allowed } -func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { hotSchedulerCounter.Inc() rw := h.randomRWType() return h.dispatch(rw, cluster), nil @@ -1193,7 +1193,7 @@ func (bs *balanceSolver) checkHistoryByPriorityAndToleranceAnyOf(loads [][]float }) } -func (bs *balanceSolver) checkByPriorityAndToleranceFirstOnly(loads []float64, f func(int) bool) bool { +func (bs *balanceSolver) checkByPriorityAndToleranceFirstOnly(_ []float64, f func(int) bool) bool { return f(bs.firstPriority) } @@ -1732,7 +1732,6 @@ func (bs *balanceSolver) createReadOperator(region *core.RegionInfo, srcStoreID, "transfer-hot-read-leader", bs, region, - srcStoreID, dstStoreID, []uint64{}, operator.OpHotRegion) @@ -1769,7 +1768,6 @@ func (bs *balanceSolver) createWriteOperator(region *core.RegionInfo, srcStoreID "transfer-hot-write-leader", bs, region, - srcStoreID, dstStoreID, []uint64{}, operator.OpHotRegion) diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index b336438830b..80d20ca65bb 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -375,7 +375,7 @@ func (conf *hotRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *http.R router.ServeHTTP(w, r) } -func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, r *http.Request) { +func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, _ *http.Request) { conf.RLock() defer conf.RUnlock() rd := render.New(render.Options{IndentJSON: true}) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 8b1893887db..5f6cca892ee 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -43,11 +43,11 @@ func init() { // TODO: remove this global variable in the future. // And use a function to create hot schduler for test. schedulePeerPr = 1.0 - RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { cfg := initHotRegionScheduleConfig() return newHotWriteScheduler(opController, cfg), nil }) - RegisterScheduler(utils.Read.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(utils.Read.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { return newHotReadScheduler(opController, initHotRegionScheduleConfig()), nil }) } @@ -138,7 +138,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { case movePeer: op, err = operator.CreateMovePeerOperator("move-peer-test", tc, region, operator.OpAdmin, 2, &metapb.Peer{Id: region.GetID()*10000 + 1, StoreId: 4}) case transferLeader: - op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 1, 2, []uint64{}, operator.OpAdmin) + op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 2, []uint64{}, operator.OpAdmin) } re.NoError(err) re.NotNil(op) diff --git a/pkg/schedule/schedulers/hot_region_v2.go b/pkg/schedule/schedulers/hot_region_v2.go index 40cb35cd16b..50016231cad 100644 --- a/pkg/schedule/schedulers/hot_region_v2.go +++ b/pkg/schedule/schedulers/hot_region_v2.go @@ -457,13 +457,13 @@ func (bs *balanceSolver) betterThanV2(old *solution) bool { if bs.cur.mainPeerStat != old.mainPeerStat { // We will firstly consider ensuring converge faster, secondly reduce oscillation if bs.resourceTy == writeLeader { - return bs.getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, + return getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority)) > 0 } - firstCmp := bs.getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, + firstCmp := getRkCmpByPriorityV2(bs.firstPriority, bs.cur.firstScore, old.firstScore, bs.cur.getPeersRateFromCache(bs.firstPriority), old.getPeersRateFromCache(bs.firstPriority)) - secondCmp := bs.getRkCmpByPriorityV2(bs.secondPriority, bs.cur.secondScore, old.secondScore, + secondCmp := getRkCmpByPriorityV2(bs.secondPriority, bs.cur.secondScore, old.secondScore, bs.cur.getPeersRateFromCache(bs.secondPriority), old.getPeersRateFromCache(bs.secondPriority)) switch bs.cur.progressiveRank { case -4, -3, -2: // firstPriority @@ -482,7 +482,7 @@ func (bs *balanceSolver) betterThanV2(old *solution) bool { return false } -func (bs *balanceSolver) getRkCmpByPriorityV2(dim int, curScore, oldScore int, curPeersRate, oldPeersRate float64) int { +func getRkCmpByPriorityV2(dim int, curScore, oldScore int, curPeersRate, oldPeersRate float64) int { switch { case curScore > oldScore: return 1 diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index e22037703cc..6bca686404d 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -52,7 +52,7 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(BalanceLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceLeaderSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -80,7 +80,7 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(BalanceRegionType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceRegionSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -105,7 +105,7 @@ func schedulersRegister() { } }) - RegisterScheduler(BalanceWitnessType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(BalanceWitnessType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceWitnessSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -152,13 +152,13 @@ func schedulersRegister() { }) // evict slow store - RegisterSliceDecoderBuilder(EvictSlowStoreType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(EvictSlowStoreType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(EvictSlowStoreType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(EvictSlowStoreType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initEvictSlowStoreSchedulerConfig(storage) if err := decoder(conf); err != nil { return nil, err @@ -198,7 +198,7 @@ func schedulersRegister() { } }) - RegisterScheduler(GrantHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(GrantHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &grantHotRegionSchedulerConfig{StoreIDs: make([]uint64, 0), storage: storage} conf.cluster = opController.GetCluster() if err := decoder(conf); err != nil { @@ -208,13 +208,13 @@ func schedulersRegister() { }) // hot region - RegisterSliceDecoderBuilder(HotRegionType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(HotRegionType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(HotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(HotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initHotRegionScheduleConfig() var data map[string]any if err := decoder(&data); err != nil { @@ -286,7 +286,7 @@ func schedulersRegister() { } }) - RegisterScheduler(LabelType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(LabelType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &labelSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -311,7 +311,7 @@ func schedulersRegister() { } }) - RegisterScheduler(RandomMergeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(RandomMergeType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &randomMergeSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -340,7 +340,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ScatterRangeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ScatterRangeType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &scatterRangeSchedulerConfig{ storage: storage, } @@ -374,7 +374,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ShuffleHotRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleHotRegionSchedulerConfig{Limit: uint64(1)} if err := decoder(conf); err != nil { return nil, err @@ -400,7 +400,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleLeaderType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ShuffleLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleLeaderSchedulerConfig{} if err := decoder(conf); err != nil { return nil, err @@ -425,7 +425,7 @@ func schedulersRegister() { } }) - RegisterScheduler(ShuffleRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(ShuffleRegionType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &shuffleRegionSchedulerConfig{storage: storage} if err := decoder(conf); err != nil { return nil, err @@ -434,13 +434,13 @@ func schedulersRegister() { }) // split bucket - RegisterSliceDecoderBuilder(SplitBucketType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(SplitBucketType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(SplitBucketType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(SplitBucketType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initSplitBucketConfig() if err := decoder(conf); err != nil { return nil, err @@ -450,24 +450,24 @@ func schedulersRegister() { }) // transfer witness leader - RegisterSliceDecoderBuilder(TransferWitnessLeaderType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(TransferWitnessLeaderType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(TransferWitnessLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(TransferWitnessLeaderType, func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { return newTransferWitnessLeaderScheduler(opController), nil }) // evict slow store by trend - RegisterSliceDecoderBuilder(EvictSlowTrendType, func(args []string) ConfigDecoder { - return func(v any) error { + RegisterSliceDecoderBuilder(EvictSlowTrendType, func([]string) ConfigDecoder { + return func(any) error { return nil } }) - RegisterScheduler(EvictSlowTrendType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(EvictSlowTrendType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := initEvictSlowTrendSchedulerConfig(storage) if err := decoder(conf); err != nil { return nil, err diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 90310bcf10e..24875e3e26a 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -68,7 +68,7 @@ func (s *labelScheduler) GetName() string { return s.conf.Name } -func (s *labelScheduler) GetType() string { +func (*labelScheduler) GetType() string { return LabelType } @@ -84,7 +84,7 @@ func (s *labelScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { return allowed } -func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { labelCounter.Inc() stores := cluster.GetStores() rejectLeaderStores := make(map[uint64]struct{}) @@ -119,7 +119,7 @@ func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([ continue } - op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, id, target.GetID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, target.GetID(), []uint64{}, operator.OpLeader) if err != nil { log.Debug("fail to create transfer label reject leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 44bb5081ef9..7fec0bd9530 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -70,7 +70,7 @@ func (s *randomMergeScheduler) GetName() string { return s.conf.Name } -func (s *randomMergeScheduler) GetType() string { +func (*randomMergeScheduler) GetType() string { return RandomMergeType } @@ -86,7 +86,7 @@ func (s *randomMergeScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) return allowed } -func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { randomMergeCounter.Inc() store := filter.NewCandidates(cluster.GetStores()). @@ -113,7 +113,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bo return nil, nil } - if !s.allowMerge(cluster, region, target) { + if !allowMerge(cluster, region, target) { randomMergeNotAllowedCounter.Inc() return nil, nil } @@ -129,7 +129,7 @@ func (s *randomMergeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bo return ops, nil } -func (s *randomMergeScheduler) allowMerge(cluster sche.SchedulerCluster, region, target *core.RegionInfo) bool { +func allowMerge(cluster sche.SchedulerCluster, region, target *core.RegionInfo) bool { if !filter.IsRegionHealthy(region) || !filter.IsRegionHealthy(target) { return false } diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 9ad9e597dfd..8a2f0a5398b 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -156,7 +156,7 @@ func (l *scatterRangeScheduler) GetName() string { return l.name } -func (l *scatterRangeScheduler) GetType() string { +func (*scatterRangeScheduler) GetType() string { return ScatterRangeType } @@ -206,7 +206,7 @@ func (l *scatterRangeScheduler) allowBalanceRegion(cluster sche.SchedulerCluster return allowed } -func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (l *scatterRangeScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { scatterRangeCounter.Inc() // isolate a new cluster according to the key range c := genRangeCluster(cluster, l.config.GetStartKey(), l.config.GetEndKey()) @@ -282,7 +282,7 @@ func (handler *scatterRangeHandler) UpdateConfig(w http.ResponseWriter, r *http. handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *scatterRangeHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *scatterRangeHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index b74b72283ec..abace59a266 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -101,7 +101,7 @@ func ConfigJSONDecoder(data []byte) ConfigDecoder { func ConfigSliceDecoder(name string, args []string) ConfigDecoder { builder, ok := schedulerArgsToDecoder[name] if !ok { - return func(v any) error { + return func(any) error { return errors.Errorf("the config decoder do not register for %s", name) } } diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index a1448fbd041..726138e8f7a 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -114,7 +114,7 @@ func (s *shuffleHotRegionScheduler) GetName() string { return s.conf.Name } -func (s *shuffleHotRegionScheduler) GetType() string { +func (*shuffleHotRegionScheduler) GetType() string { return ShuffleHotRegionType } @@ -157,7 +157,7 @@ func (s *shuffleHotRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerClus return hotRegionAllowed && regionAllowed && leaderAllowed } -func (s *shuffleHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { shuffleHotRegionCounter.Inc() rw := s.randomRWType() s.prepareForBalance(rw, cluster) @@ -250,7 +250,7 @@ func (handler *shuffleHotRegionHandler) UpdateConfig(w http.ResponseWriter, r *h handler.rd.JSON(w, http.StatusOK, nil) } -func (handler *shuffleHotRegionHandler) ListConfig(w http.ResponseWriter, r *http.Request) { +func (handler *shuffleHotRegionHandler) ListConfig(w http.ResponseWriter, _ *http.Request) { conf := handler.config.Clone() handler.rd.JSON(w, http.StatusOK, conf) } diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index a6ff4baf65b..5b3dfd9fd20 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -71,7 +71,7 @@ func (s *shuffleLeaderScheduler) GetName() string { return s.conf.Name } -func (s *shuffleLeaderScheduler) GetType() string { +func (*shuffleLeaderScheduler) GetType() string { return ShuffleLeaderType } @@ -87,7 +87,7 @@ func (s *shuffleLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } -func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { // We shuffle leaders between stores by: // 1. random select a valid store. // 2. transfer a leader to the store. @@ -106,7 +106,7 @@ func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun shuffleLeaderNoFollowerCounter.Inc() return nil, nil } - op, err := operator.CreateTransferLeaderOperator(ShuffleLeaderType, cluster, region, region.GetLeader().GetId(), targetStore.GetID(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator(ShuffleLeaderType, cluster, region, targetStore.GetID(), []uint64{}, operator.OpAdmin) if err != nil { log.Debug("fail to create shuffle leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index f9bed18d3fa..b1a100384ae 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -68,11 +68,11 @@ func (s *shuffleRegionScheduler) ServeHTTP(w http.ResponseWriter, r *http.Reques s.conf.ServeHTTP(w, r) } -func (s *shuffleRegionScheduler) GetName() string { +func (*shuffleRegionScheduler) GetName() string { return ShuffleRegionName } -func (s *shuffleRegionScheduler) GetType() string { +func (*shuffleRegionScheduler) GetType() string { return ShuffleRegionType } @@ -107,7 +107,7 @@ func (s *shuffleRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster return allowed } -func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *shuffleRegionScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { shuffleRegionCounter.Inc() region, oldPeer := s.scheduleRemovePeer(cluster) if region == nil { diff --git a/pkg/schedule/schedulers/shuffle_region_config.go b/pkg/schedule/schedulers/shuffle_region_config.go index 552d7ea8bce..bce64f743b8 100644 --- a/pkg/schedule/schedulers/shuffle_region_config.go +++ b/pkg/schedule/schedulers/shuffle_region_config.go @@ -77,7 +77,7 @@ func (conf *shuffleRegionSchedulerConfig) ServeHTTP(w http.ResponseWriter, r *ht router.ServeHTTP(w, r) } -func (conf *shuffleRegionSchedulerConfig) handleGetRoles(w http.ResponseWriter, r *http.Request) { +func (conf *shuffleRegionSchedulerConfig) handleGetRoles(w http.ResponseWriter, _ *http.Request) { rd := render.New(render.Options{IndentJSON: true}) rd.JSON(w, http.StatusOK, conf.GetRoles()) } diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 7e276402e49..609510446c7 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -175,12 +175,12 @@ func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucke } // GetName returns the name of the split bucket scheduler. -func (s *splitBucketScheduler) GetName() string { +func (*splitBucketScheduler) GetName() string { return SplitBucketName } // GetType returns the type of the split bucket scheduler. -func (s *splitBucketScheduler) GetType() string { +func (*splitBucketScheduler) GetType() string { return SplitBucketType } @@ -230,7 +230,7 @@ type splitBucketPlan struct { } // Schedule return operators if some bucket is too hot. -func (s *splitBucketScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *splitBucketScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { splitBucketScheduleCounter.Inc() conf := s.conf.Clone() plan := &splitBucketPlan{ diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index c651a8ef872..9ba78985d13 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -60,19 +60,19 @@ func newTransferWitnessLeaderScheduler(opController *operator.Controller) Schedu } } -func (s *transferWitnessLeaderScheduler) GetName() string { +func (*transferWitnessLeaderScheduler) GetName() string { return TransferWitnessLeaderName } -func (s *transferWitnessLeaderScheduler) GetType() string { +func (*transferWitnessLeaderScheduler) GetType() string { return TransferWitnessLeaderType } -func (s *transferWitnessLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) bool { +func (*transferWitnessLeaderScheduler) IsScheduleAllowed(sche.SchedulerCluster) bool { return true } -func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { +func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { transferWitnessLeaderCounter.Inc() return s.scheduleTransferWitnessLeaderBatch(s.GetName(), s.GetType(), cluster, transferWitnessLeaderBatchSize), nil } @@ -83,7 +83,7 @@ batchLoop: for i := 0; i < batchSize; i++ { select { case region := <-s.regions: - op, err := s.scheduleTransferWitnessLeader(name, typ, cluster, region) + op, err := scheduleTransferWitnessLeader(name, typ, cluster, region) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) continue @@ -100,7 +100,7 @@ batchLoop: return ops } -func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { +func scheduleTransferWitnessLeader(name, typ string, cluster sche.SchedulerCluster, region *core.RegionInfo) (*operator.Operator, error) { var filters []filter.Filter unhealthyPeerStores := make(map[uint64]struct{}) for _, peer := range region.GetDownPeers() { @@ -123,7 +123,7 @@ func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeader(name, typ for _, t := range targets { targetIDs = append(targetIDs, t.GetID()) } - return operator.CreateTransferLeaderOperator(typ, cluster, region, region.GetLeader().GetStoreId(), target.GetID(), targetIDs, operator.OpWitnessLeader) + return operator.CreateTransferLeaderOperator(typ, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) } // RecvRegionInfo receives a checked region from coordinator diff --git a/pkg/schedule/splitter/region_splitter.go b/pkg/schedule/splitter/region_splitter.go index f0da8442a2c..aeab4b70cf0 100644 --- a/pkg/schedule/splitter/region_splitter.go +++ b/pkg/schedule/splitter/region_splitter.go @@ -108,6 +108,7 @@ func (r *RegionSplitter) splitRegionsByKeys(parCtx context.Context, splitKeys [] ticker.Stop() cancel() }() +outerLoop: for { select { case <-ticker.C: @@ -118,7 +119,7 @@ func (r *RegionSplitter) splitRegionsByKeys(parCtx context.Context, splitKeys [] r.handler.ScanRegionsByKeyRange(groupKeys, results) } case <-ctx.Done(): - break + break outerLoop } finished := true for _, groupKeys := range validGroups { diff --git a/pkg/schedule/splitter/region_splitter_test.go b/pkg/schedule/splitter/region_splitter_test.go index ebb8b225a9b..2dbadf6701c 100644 --- a/pkg/schedule/splitter/region_splitter_test.go +++ b/pkg/schedule/splitter/region_splitter_test.go @@ -37,7 +37,7 @@ func newMockSplitRegionsHandler() *mockSplitRegionsHandler { } // SplitRegionByKeys mock SplitRegionsHandler -func (m *mockSplitRegionsHandler) SplitRegionByKeys(region *core.RegionInfo, splitKeys [][]byte) error { +func (m *mockSplitRegionsHandler) SplitRegionByKeys(region *core.RegionInfo, _ [][]byte) error { m.regions[region.GetID()] = [2][]byte{ region.GetStartKey(), region.GetEndKey(), diff --git a/pkg/statistics/buckets/hot_bucket_task.go b/pkg/statistics/buckets/hot_bucket_task.go index d6a43a6f8ae..ff7c30a7d81 100644 --- a/pkg/statistics/buckets/hot_bucket_task.go +++ b/pkg/statistics/buckets/hot_bucket_task.go @@ -55,7 +55,7 @@ func NewCheckPeerTask(buckets *metapb.Buckets) flowBucketsItemTask { } } -func (t *checkBucketsTask) taskType() flowItemTaskKind { +func (*checkBucketsTask) taskType() flowItemTaskKind { return checkBucketsTaskType } @@ -79,7 +79,7 @@ func NewCollectBucketStatsTask(minDegree int, regionIDs ...uint64) *collectBucke } } -func (t *collectBucketStatsTask) taskType() flowItemTaskKind { +func (*collectBucketStatsTask) taskType() flowItemTaskKind { return collectBucketStatsTaskType } diff --git a/pkg/statistics/collector.go b/pkg/statistics/collector.go index e64b673803d..88986b93d4b 100644 --- a/pkg/statistics/collector.go +++ b/pkg/statistics/collector.go @@ -36,11 +36,11 @@ func newTikvCollector() storeCollector { return tikvCollector{} } -func (c tikvCollector) Engine() string { +func (tikvCollector) Engine() string { return core.EngineTiKV } -func (c tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { +func (tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { if info.IsTiFlash() { return false } @@ -53,7 +53,7 @@ func (c tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind return false } -func (c tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { +func (tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { loads = make([]float64, utils.DimLen) switch rwTy { case utils.Read: @@ -87,11 +87,11 @@ func newTiFlashCollector(isTraceRegionFlow bool) storeCollector { return tiflashCollector{isTraceRegionFlow: isTraceRegionFlow} } -func (c tiflashCollector) Engine() string { +func (tiflashCollector) Engine() string { return core.EngineTiFlash } -func (c tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { +func (tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool { switch kind { case constant.LeaderKind: return false diff --git a/pkg/statistics/hot_cache_task.go b/pkg/statistics/hot_cache_task.go index c84a292b4e7..fa224b522ff 100644 --- a/pkg/statistics/hot_cache_task.go +++ b/pkg/statistics/hot_cache_task.go @@ -146,7 +146,7 @@ func newCollectMetricsTask() *collectMetricsTask { return &collectMetricsTask{} } -func (t *collectMetricsTask) runTask(cache *hotPeerCache) { +func (*collectMetricsTask) runTask(cache *hotPeerCache) { cache.collectMetrics() } diff --git a/pkg/statistics/hot_peer_cache.go b/pkg/statistics/hot_peer_cache.go index 0e35e0e23be..cd27dcad4c8 100644 --- a/pkg/statistics/hot_peer_cache.go +++ b/pkg/statistics/hot_peer_cache.go @@ -451,7 +451,7 @@ func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt // For write stat, as the stat is send by region heartbeat, the first heartbeat will be skipped. // For read stat, as the stat is send by store heartbeat, the first heartbeat won't be skipped. if f.kind == utils.Write { - f.inheritItem(newItem, oldItem) + inheritItem(newItem, oldItem) return newItem } } else { @@ -465,25 +465,25 @@ func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt isFull := newItem.rollingLoads[0].isFull(f.interval()) // The intervals of dims are the same, so it is only necessary to determine whether any of them if !isFull { // not update hot degree and anti count - f.inheritItem(newItem, oldItem) + inheritItem(newItem, oldItem) } else { // If item is inCold, it means the pd didn't recv this item in the store heartbeat, // thus we make it colder if newItem.inCold { - f.coldItem(newItem, oldItem) + coldItem(newItem, oldItem) } else { thresholds := f.calcHotThresholds(newItem.StoreID) if f.isOldColdPeer(oldItem, newItem.StoreID) { if newItem.isHot(thresholds) { - f.initItem(newItem) + initItem(newItem, f.kind.DefaultAntiCount()) } else { newItem.actionType = utils.Remove } } else { if newItem.isHot(thresholds) { - f.hotItem(newItem, oldItem) + hotItem(newItem, oldItem, f.kind.DefaultAntiCount()) } else { - f.coldItem(newItem, oldItem) + coldItem(newItem, oldItem) } } } @@ -496,7 +496,7 @@ func (f *hotPeerCache) updateNewHotPeerStat(newItem *HotPeerStat, deltaLoads []f regionStats := f.kind.RegionStats() // interval is not 0 which is guaranteed by the caller. if interval.Seconds() >= float64(f.kind.ReportInterval()) { - f.initItem(newItem) + initItem(newItem, f.kind.DefaultAntiCount()) } newItem.actionType = utils.Add newItem.rollingLoads = make([]*dimStat, len(regionStats)) @@ -556,7 +556,7 @@ func (f *hotPeerCache) removeAllItem() { } } -func (f *hotPeerCache) coldItem(newItem, oldItem *HotPeerStat) { +func coldItem(newItem, oldItem *HotPeerStat) { newItem.HotDegree = oldItem.HotDegree - 1 newItem.AntiCount = oldItem.AntiCount - 1 if newItem.AntiCount <= 0 { @@ -566,9 +566,9 @@ func (f *hotPeerCache) coldItem(newItem, oldItem *HotPeerStat) { } } -func (f *hotPeerCache) hotItem(newItem, oldItem *HotPeerStat) { +func hotItem(newItem, oldItem *HotPeerStat, defaultAntiCount int) { newItem.HotDegree = oldItem.HotDegree + 1 - if oldItem.AntiCount < f.kind.DefaultAntiCount() { + if oldItem.AntiCount < defaultAntiCount { newItem.AntiCount = oldItem.AntiCount + 1 } else { newItem.AntiCount = oldItem.AntiCount @@ -576,13 +576,13 @@ func (f *hotPeerCache) hotItem(newItem, oldItem *HotPeerStat) { newItem.allowInherited = true } -func (f *hotPeerCache) initItem(item *HotPeerStat) { +func initItem(item *HotPeerStat, defaultAntiCount int) { item.HotDegree = 1 - item.AntiCount = f.kind.DefaultAntiCount() + item.AntiCount = defaultAntiCount item.allowInherited = true } -func (f *hotPeerCache) inheritItem(newItem, oldItem *HotPeerStat) { +func inheritItem(newItem, oldItem *HotPeerStat) { newItem.HotDegree = oldItem.HotDegree newItem.AntiCount = oldItem.AntiCount } diff --git a/pkg/statistics/slow_stat.go b/pkg/statistics/slow_stat.go index 4079043d154..cc579b3d90b 100644 --- a/pkg/statistics/slow_stat.go +++ b/pkg/statistics/slow_stat.go @@ -15,8 +15,6 @@ package statistics import ( - "context" - "github.com/tikv/pd/pkg/utils/syncutil" ) @@ -26,7 +24,7 @@ type SlowStat struct { } // NewSlowStat creates the container to hold slow nodes' statistics. -func NewSlowStat(ctx context.Context) *SlowStat { +func NewSlowStat() *SlowStat { return &SlowStat{ SlowStoresStats: NewSlowStoresStats(), } diff --git a/pkg/statistics/store_collection.go b/pkg/statistics/store_collection.go index aacd45338d1..4f76ffb0b5f 100644 --- a/pkg/statistics/store_collection.go +++ b/pkg/statistics/store_collection.go @@ -147,7 +147,7 @@ func (s *storeStatistics) Observe(store *core.StoreInfo) { } } -func (s *storeStatistics) ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { +func ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { // Store flows. storeAddress := store.GetAddress() id := strconv.FormatUint(store.GetID(), 10) @@ -309,10 +309,6 @@ func (m *storeStatisticsMap) Observe(store *core.StoreInfo) { m.stats.Observe(store) } -func (m *storeStatisticsMap) ObserveHotStat(store *core.StoreInfo, stats *StoresStats) { - m.stats.ObserveHotStat(store, stats) -} - func (m *storeStatisticsMap) Collect() { m.stats.Collect() } diff --git a/pkg/statistics/store_collection_test.go b/pkg/statistics/store_collection_test.go index 02e6350ffa4..64a02a54bb4 100644 --- a/pkg/statistics/store_collection_test.go +++ b/pkg/statistics/store_collection_test.go @@ -68,7 +68,7 @@ func TestStoreStatistics(t *testing.T) { storeStats := NewStoreStatisticsMap(opt) for _, store := range stores { storeStats.Observe(store) - storeStats.ObserveHotStat(store, storesStats) + ObserveHotStat(store, storesStats) } stats := storeStats.stats diff --git a/pkg/storage/endpoint/keyspace.go b/pkg/storage/endpoint/keyspace.go index 77c81b2c8d6..30540e49a2e 100644 --- a/pkg/storage/endpoint/keyspace.go +++ b/pkg/storage/endpoint/keyspace.go @@ -48,7 +48,7 @@ type KeyspaceStorage interface { var _ KeyspaceStorage = (*StorageEndpoint)(nil) // SaveKeyspaceMeta adds a save keyspace meta operation to target transaction. -func (se *StorageEndpoint) SaveKeyspaceMeta(txn kv.Txn, meta *keyspacepb.KeyspaceMeta) error { +func (*StorageEndpoint) SaveKeyspaceMeta(txn kv.Txn, meta *keyspacepb.KeyspaceMeta) error { metaPath := KeyspaceMetaPath(meta.GetId()) metaVal, err := proto.Marshal(meta) if err != nil { @@ -59,7 +59,7 @@ func (se *StorageEndpoint) SaveKeyspaceMeta(txn kv.Txn, meta *keyspacepb.Keyspac // LoadKeyspaceMeta load and return keyspace meta specified by id. // If keyspace does not exist or error occurs, returned meta will be nil. -func (se *StorageEndpoint) LoadKeyspaceMeta(txn kv.Txn, id uint32) (*keyspacepb.KeyspaceMeta, error) { +func (*StorageEndpoint) LoadKeyspaceMeta(txn kv.Txn, id uint32) (*keyspacepb.KeyspaceMeta, error) { metaPath := KeyspaceMetaPath(id) metaVal, err := txn.Load(metaPath) if err != nil || metaVal == "" { @@ -74,7 +74,7 @@ func (se *StorageEndpoint) LoadKeyspaceMeta(txn kv.Txn, id uint32) (*keyspacepb. } // SaveKeyspaceID saves keyspace ID to the path specified by keyspace name. -func (se *StorageEndpoint) SaveKeyspaceID(txn kv.Txn, id uint32, name string) error { +func (*StorageEndpoint) SaveKeyspaceID(txn kv.Txn, id uint32, name string) error { idPath := KeyspaceIDPath(name) idVal := strconv.FormatUint(uint64(id), SpaceIDBase) return txn.Save(idPath, idVal) @@ -83,7 +83,7 @@ func (se *StorageEndpoint) SaveKeyspaceID(txn kv.Txn, id uint32, name string) er // LoadKeyspaceID loads keyspace ID from the path specified by keyspace name. // An additional boolean is returned to indicate whether target id exists, // it returns false if target id not found, or if error occurred. -func (se *StorageEndpoint) LoadKeyspaceID(txn kv.Txn, name string) (bool, uint32, error) { +func (*StorageEndpoint) LoadKeyspaceID(txn kv.Txn, name string) (bool, uint32, error) { idPath := KeyspaceIDPath(name) idVal, err := txn.Load(idPath) // Failed to load the keyspaceID if loading operation errored, or if keyspace does not exist. @@ -99,7 +99,7 @@ func (se *StorageEndpoint) LoadKeyspaceID(txn kv.Txn, name string) (bool, uint32 // LoadRangeKeyspace loads keyspaces starting at startID. // limit specifies the limit of loaded keyspaces. -func (se *StorageEndpoint) LoadRangeKeyspace(txn kv.Txn, startID uint32, limit int) ([]*keyspacepb.KeyspaceMeta, error) { +func (*StorageEndpoint) LoadRangeKeyspace(txn kv.Txn, startID uint32, limit int) ([]*keyspacepb.KeyspaceMeta, error) { startKey := KeyspaceMetaPath(startID) endKey := clientv3.GetPrefixRangeEnd(KeyspaceMetaPrefix()) keys, values, err := txn.LoadRange(startKey, endKey, limit) diff --git a/pkg/storage/endpoint/meta.go b/pkg/storage/endpoint/meta.go index d83e2b386c8..33482da512f 100644 --- a/pkg/storage/endpoint/meta.go +++ b/pkg/storage/endpoint/meta.go @@ -236,7 +236,7 @@ func (se *StorageEndpoint) DeleteRegion(region *metapb.Region) error { } // Flush flushes the pending data to the underlying storage backend. -func (se *StorageEndpoint) Flush() error { return nil } +func (*StorageEndpoint) Flush() error { return nil } // Close closes the underlying storage backend. -func (se *StorageEndpoint) Close() error { return nil } +func (*StorageEndpoint) Close() error { return nil } diff --git a/pkg/storage/endpoint/rule.go b/pkg/storage/endpoint/rule.go index d0092e8e303..84ad6ee1352 100644 --- a/pkg/storage/endpoint/rule.go +++ b/pkg/storage/endpoint/rule.go @@ -44,12 +44,12 @@ type RuleStorage interface { var _ RuleStorage = (*StorageEndpoint)(nil) // SaveRule stores a rule cfg to the rulesPath. -func (se *StorageEndpoint) SaveRule(txn kv.Txn, ruleKey string, rule any) error { +func (*StorageEndpoint) SaveRule(txn kv.Txn, ruleKey string, rule any) error { return saveJSONInTxn(txn, ruleKeyPath(ruleKey), rule) } // DeleteRule removes a rule from storage. -func (se *StorageEndpoint) DeleteRule(txn kv.Txn, ruleKey string) error { +func (*StorageEndpoint) DeleteRule(txn kv.Txn, ruleKey string) error { return txn.Remove(ruleKeyPath(ruleKey)) } @@ -59,12 +59,12 @@ func (se *StorageEndpoint) LoadRuleGroups(f func(k, v string)) error { } // SaveRuleGroup stores a rule group config to storage. -func (se *StorageEndpoint) SaveRuleGroup(txn kv.Txn, groupID string, group any) error { +func (*StorageEndpoint) SaveRuleGroup(txn kv.Txn, groupID string, group any) error { return saveJSONInTxn(txn, ruleGroupIDPath(groupID), group) } // DeleteRuleGroup removes a rule group from storage. -func (se *StorageEndpoint) DeleteRuleGroup(txn kv.Txn, groupID string) error { +func (*StorageEndpoint) DeleteRuleGroup(txn kv.Txn, groupID string) error { return txn.Remove(ruleGroupIDPath(groupID)) } @@ -74,12 +74,12 @@ func (se *StorageEndpoint) LoadRegionRules(f func(k, v string)) error { } // SaveRegionRule saves a region rule to the storage. -func (se *StorageEndpoint) SaveRegionRule(txn kv.Txn, ruleKey string, rule any) error { +func (*StorageEndpoint) SaveRegionRule(txn kv.Txn, ruleKey string, rule any) error { return saveJSONInTxn(txn, regionLabelKeyPath(ruleKey), rule) } // DeleteRegionRule removes a region rule from storage. -func (se *StorageEndpoint) DeleteRegionRule(txn kv.Txn, ruleKey string) error { +func (*StorageEndpoint) DeleteRegionRule(txn kv.Txn, ruleKey string) error { return txn.Remove(regionLabelKeyPath(ruleKey)) } diff --git a/pkg/storage/endpoint/tso_keyspace_group.go b/pkg/storage/endpoint/tso_keyspace_group.go index 39a08afe937..ba322336feb 100644 --- a/pkg/storage/endpoint/tso_keyspace_group.go +++ b/pkg/storage/endpoint/tso_keyspace_group.go @@ -163,7 +163,7 @@ type KeyspaceGroupStorage interface { var _ KeyspaceGroupStorage = (*StorageEndpoint)(nil) // LoadKeyspaceGroup loads the keyspace group by ID. -func (se *StorageEndpoint) LoadKeyspaceGroup(txn kv.Txn, id uint32) (*KeyspaceGroup, error) { +func (*StorageEndpoint) LoadKeyspaceGroup(txn kv.Txn, id uint32) (*KeyspaceGroup, error) { value, err := txn.Load(KeyspaceGroupIDPath(id)) if err != nil || value == "" { return nil, err @@ -176,12 +176,12 @@ func (se *StorageEndpoint) LoadKeyspaceGroup(txn kv.Txn, id uint32) (*KeyspaceGr } // SaveKeyspaceGroup saves the keyspace group. -func (se *StorageEndpoint) SaveKeyspaceGroup(txn kv.Txn, kg *KeyspaceGroup) error { +func (*StorageEndpoint) SaveKeyspaceGroup(txn kv.Txn, kg *KeyspaceGroup) error { return saveJSONInTxn(txn, KeyspaceGroupIDPath(kg.ID), kg) } // DeleteKeyspaceGroup deletes the keyspace group. -func (se *StorageEndpoint) DeleteKeyspaceGroup(txn kv.Txn, id uint32) error { +func (*StorageEndpoint) DeleteKeyspaceGroup(txn kv.Txn, id uint32) error { return txn.Remove(KeyspaceGroupIDPath(id)) } diff --git a/pkg/syncer/client_test.go b/pkg/syncer/client_test.go index 84193ebaffe..e7be77d2bb0 100644 --- a/pkg/syncer/client_test.go +++ b/pkg/syncer/client_test.go @@ -91,7 +91,7 @@ func (s *mockServer) LoopContext() context.Context { return s.ctx } -func (s *mockServer) ClusterID() uint64 { +func (*mockServer) ClusterID() uint64 { return 1 } @@ -107,7 +107,7 @@ func (s *mockServer) GetStorage() storage.Storage { return s.storage } -func (s *mockServer) Name() string { +func (*mockServer) Name() string { return "mock-server" } @@ -115,7 +115,7 @@ func (s *mockServer) GetRegions() []*core.RegionInfo { return s.bc.GetRegions() } -func (s *mockServer) GetTLSConfig() *grpcutil.TLSConfig { +func (*mockServer) GetTLSConfig() *grpcutil.TLSConfig { return &grpcutil.TLSConfig{} } diff --git a/pkg/tso/global_allocator.go b/pkg/tso/global_allocator.go index a37bcc73881..f90dc5f26fe 100644 --- a/pkg/tso/global_allocator.go +++ b/pkg/tso/global_allocator.go @@ -187,7 +187,7 @@ func (gta *GlobalTSOAllocator) Initialize(int) error { gta.tsoAllocatorRoleGauge.Set(1) // The suffix of a Global TSO should always be 0. gta.timestampOracle.suffix = 0 - return gta.timestampOracle.SyncTimestamp(gta.member.GetLeadership()) + return gta.timestampOracle.SyncTimestamp() } // IsInitialize is used to indicates whether this allocator is initialized. @@ -197,7 +197,7 @@ func (gta *GlobalTSOAllocator) IsInitialize() bool { // UpdateTSO is used to update the TSO in memory and the time window in etcd. func (gta *GlobalTSOAllocator) UpdateTSO() error { - return gta.timestampOracle.UpdateTimestamp(gta.member.GetLeadership()) + return gta.timestampOracle.UpdateTimestamp() } // SetTSO sets the physical part with given TSO. diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index d259ab27a5b..d1e94d445cc 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -674,7 +674,7 @@ func (kgm *KeyspaceGroupManager) isAssignedToMe(group *endpoint.KeyspaceGroup) b // updateKeyspaceGroup applies the given keyspace group. If the keyspace group is just assigned to // this host/pod, it will join the primary election. func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGroup) { - if err := kgm.checkKeySpaceGroupID(group.ID); err != nil { + if err := checkKeySpaceGroupID(group.ID); err != nil { log.Warn("keyspace group ID is invalid, ignore it", zap.Error(err)) return } @@ -751,7 +751,7 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroup(group *endpoint.KeyspaceGro kgm.groupUpdateRetryList[group.ID] = group return } - participant.SetCampaignChecker(func(leadership *election.Leadership) bool { + participant.SetCampaignChecker(func(*election.Leadership) bool { return splitSourceAM.GetMember().IsLeader() }) } @@ -997,7 +997,7 @@ func (kgm *KeyspaceGroupManager) exitElectionMembership(group *endpoint.Keyspace // GetAllocatorManager returns the AllocatorManager of the given keyspace group func (kgm *KeyspaceGroupManager) GetAllocatorManager(keyspaceGroupID uint32) (*AllocatorManager, error) { - if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { + if err := checkKeySpaceGroupID(keyspaceGroupID); err != nil { return nil, err } if am, _ := kgm.getKeyspaceGroupMeta(keyspaceGroupID); am != nil { @@ -1022,7 +1022,7 @@ func (kgm *KeyspaceGroupManager) FindGroupByKeyspaceID( func (kgm *KeyspaceGroupManager) GetElectionMember( keyspaceID, keyspaceGroupID uint32, ) (ElectionMember, error) { - if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { + if err := checkKeySpaceGroupID(keyspaceGroupID); err != nil { return nil, err } am, _, _, err := kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, keyspaceGroupID) @@ -1052,7 +1052,7 @@ func (kgm *KeyspaceGroupManager) HandleTSORequest( keyspaceID, keyspaceGroupID uint32, dcLocation string, count uint32, ) (ts pdpb.Timestamp, curKeyspaceGroupID uint32, err error) { - if err := kgm.checkKeySpaceGroupID(keyspaceGroupID); err != nil { + if err := checkKeySpaceGroupID(keyspaceGroupID); err != nil { return pdpb.Timestamp{}, keyspaceGroupID, err } am, _, curKeyspaceGroupID, err := kgm.getKeyspaceGroupMetaWithCheck(keyspaceID, keyspaceGroupID) @@ -1086,7 +1086,7 @@ func (kgm *KeyspaceGroupManager) HandleTSORequest( return ts, curKeyspaceGroupID, err } -func (kgm *KeyspaceGroupManager) checkKeySpaceGroupID(id uint32) error { +func checkKeySpaceGroupID(id uint32) error { if id < mcsutils.MaxKeyspaceGroupCountInUse { return nil } diff --git a/pkg/tso/local_allocator.go b/pkg/tso/local_allocator.go index 45c200ca566..e9019bf2bf3 100644 --- a/pkg/tso/local_allocator.go +++ b/pkg/tso/local_allocator.go @@ -101,7 +101,7 @@ func (lta *LocalTSOAllocator) GetDCLocation() string { func (lta *LocalTSOAllocator) Initialize(suffix int) error { lta.tsoAllocatorRoleGauge.Set(1) lta.timestampOracle.suffix = suffix - return lta.timestampOracle.SyncTimestamp(lta.leadership) + return lta.timestampOracle.SyncTimestamp() } // IsInitialize is used to indicates whether this allocator is initialized. @@ -112,7 +112,7 @@ func (lta *LocalTSOAllocator) IsInitialize() bool { // UpdateTSO is used to update the TSO in memory and the time window in etcd // for all local TSO allocators this PD server hold. func (lta *LocalTSOAllocator) UpdateTSO() error { - return lta.timestampOracle.UpdateTimestamp(lta.leadership) + return lta.timestampOracle.UpdateTimestamp() } // SetTSO sets the physical part with given TSO. diff --git a/pkg/tso/tso.go b/pkg/tso/tso.go index 5ad786678c4..bcb3169e73c 100644 --- a/pkg/tso/tso.go +++ b/pkg/tso/tso.go @@ -156,7 +156,7 @@ func (t *timestampOracle) GetTimestampPath() string { } // SyncTimestamp is used to synchronize the timestamp. -func (t *timestampOracle) SyncTimestamp(leadership *election.Leadership) error { +func (t *timestampOracle) SyncTimestamp() error { log.Info("start to sync timestamp", logutil.CondUint32("keyspace-group-id", t.keyspaceGroupID, t.keyspaceGroupID > 0)) t.metrics.syncEvent.Inc() @@ -311,7 +311,7 @@ func (t *timestampOracle) resetUserTimestampInner(leadership *election.Leadershi // // NOTICE: this function should be called after the TSO in memory has been initialized // and should not be called when the TSO in memory has been reset anymore. -func (t *timestampOracle) UpdateTimestamp(leadership *election.Leadership) error { +func (t *timestampOracle) UpdateTimestamp() error { if !t.isInitialized() { return errs.ErrUpdateTimestamp.FastGenByArgs("timestamp in memory has not been initialized") } diff --git a/pkg/utils/apiutil/apiutil.go b/pkg/utils/apiutil/apiutil.go index c762245321e..d0745ada271 100644 --- a/pkg/utils/apiutil/apiutil.go +++ b/pkg/utils/apiutil/apiutil.go @@ -441,16 +441,15 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) log.Error("request failed", errs.ZapError(errs.ErrSendRequest, err)) continue } - defer resp.Body.Close() var reader io.ReadCloser switch resp.Header.Get("Content-Encoding") { case "gzip": reader, err = gzip.NewReader(resp.Body) if err != nil { log.Error("failed to parse response with gzip compress", zap.Error(err)) + resp.Body.Close() continue } - defer reader.Close() default: reader = resp.Body } @@ -474,6 +473,8 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) break } } + resp.Body.Close() + reader.Close() if err != nil { log.Error("write failed", errs.ZapError(errs.ErrWriteHTTPBody, err), zap.String("target-address", url.String())) // try next url. diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index e02615b695f..6ddeafe4573 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -438,7 +438,7 @@ func (suite *loopWatcherTestSuite) TestLoadNoExistedKey() { cache[string(kv.Key)] = struct{}{} return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, false, /* withPrefix */ ) @@ -466,7 +466,7 @@ func (suite *loopWatcherTestSuite) TestLoadWithLimitChange() { cache[string(kv.Key)] = struct{}{} return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, true, /* withPrefix */ ) @@ -559,7 +559,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { cache = append(cache, string(kv.Key)) return nil }, - func(kv *mvccpb.KeyValue) error { + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { @@ -598,7 +598,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLargeKey() { cache = append(cache, string(kv.Key)) return nil }, - func(kv *mvccpb.KeyValue) error { + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { @@ -641,7 +641,7 @@ func (suite *loopWatcherTestSuite) TestWatcherBreak() { } return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, false, /* withPrefix */ ) @@ -719,8 +719,8 @@ func (suite *loopWatcherTestSuite) TestWatcherRequestProgress() { "test", "TestWatcherChanBlock", func([]*clientv3.Event) error { return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, - func(kv *mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, + func(*mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, false, /* withPrefix */ ) diff --git a/pkg/utils/etcdutil/health_checker.go b/pkg/utils/etcdutil/health_checker.go index 51c1808de4a..44bddd8b183 100644 --- a/pkg/utils/etcdutil/health_checker.go +++ b/pkg/utils/etcdutil/health_checker.go @@ -146,7 +146,7 @@ func (checker *healthChecker) inspector(ctx context.Context) { } func (checker *healthChecker) close() { - checker.healthyClients.Range(func(key, value any) bool { + checker.healthyClients.Range(func(_, value any) bool { healthyCli := value.(*healthyClient) healthyCli.healthState.Set(0) healthyCli.Client.Close() @@ -382,7 +382,7 @@ func (checker *healthChecker) update() { } } // Clean up the stale clients which are not in the etcd cluster anymore. - checker.healthyClients.Range(func(key, value any) bool { + checker.healthyClients.Range(func(key, _ any) bool { ep := key.(string) if _, ok := epMap[ep]; !ok { log.Info("remove stale etcd client", diff --git a/pkg/utils/logutil/log.go b/pkg/utils/logutil/log.go index 8c0977818fa..ff6ffa7af9a 100644 --- a/pkg/utils/logutil/log.go +++ b/pkg/utils/logutil/log.go @@ -149,7 +149,7 @@ type stringer struct { } // String implement fmt.Stringer -func (s stringer) String() string { +func (stringer) String() string { return "?" } diff --git a/pkg/utils/metricutil/metricutil_test.go b/pkg/utils/metricutil/metricutil_test.go index acac9ce4d49..a5c183abc20 100644 --- a/pkg/utils/metricutil/metricutil_test.go +++ b/pkg/utils/metricutil/metricutil_test.go @@ -55,7 +55,7 @@ func TestCamelCaseToSnakeCase(t *testing.T) { } } -func TestCoverage(t *testing.T) { +func TestCoverage(_ *testing.T) { cfgs := []*MetricConfig{ { PushJob: "j1", diff --git a/pkg/utils/tempurl/check_env_dummy.go b/pkg/utils/tempurl/check_env_dummy.go index 85f527ea6fe..58d889bbfd6 100644 --- a/pkg/utils/tempurl/check_env_dummy.go +++ b/pkg/utils/tempurl/check_env_dummy.go @@ -16,6 +16,6 @@ package tempurl -func environmentCheck(addr string) bool { +func environmentCheck(_ string) bool { return true } diff --git a/pkg/utils/testutil/api_check.go b/pkg/utils/testutil/api_check.go index 5356d18514b..0b714204500 100644 --- a/pkg/utils/testutil/api_check.go +++ b/pkg/utils/testutil/api_check.go @@ -100,7 +100,8 @@ func ReadGetJSONWithBody(re *require.Assertions, client *http.Client, url string if err != nil { return err } - return checkResp(resp, StatusOK(re), ExtractJSON(re, data)) + checkOpts = append(checkOpts, StatusOK(re), ExtractJSON(re, data)) + return checkResp(resp, checkOpts...) } // CheckPostJSON is used to do post request and do check options. diff --git a/pkg/utils/tsoutil/tso_dispatcher.go b/pkg/utils/tsoutil/tso_dispatcher.go index 6d1ee2ace28..9dfb2515dc1 100644 --- a/pkg/utils/tsoutil/tso_dispatcher.go +++ b/pkg/utils/tsoutil/tso_dispatcher.go @@ -128,7 +128,7 @@ func (s *TSODispatcher) dispatch( case <-dispatcherCtx.Done(): return } - err = s.processRequests(forwardStream, requests[:pendingTSOReqCount], tsoProtoFactory) + err = s.processRequests(forwardStream, requests[:pendingTSOReqCount]) close(done) if err != nil { log.Error("proxy forward tso error", @@ -155,7 +155,7 @@ func (s *TSODispatcher) dispatch( } } -func (s *TSODispatcher) processRequests(forwardStream stream, requests []Request, tsoProtoFactory ProtoFactory) error { +func (s *TSODispatcher) processRequests(forwardStream stream, requests []Request) error { // Merge the requests count := uint32(0) for _, request := range requests { @@ -163,7 +163,7 @@ func (s *TSODispatcher) processRequests(forwardStream stream, requests []Request } start := time.Now() - resp, err := requests[0].process(forwardStream, count, tsoProtoFactory) + resp, err := requests[0].process(forwardStream, count) if err != nil { return err } @@ -184,7 +184,7 @@ func addLogical(logical, count int64, suffixBits uint32) int64 { return logical + count<&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c $(ROOT_PATH)/.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config $(ROOT_PATH)/revive.toml ./... tidy: @ go mod tidy diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index da4be99638d..10be418c029 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -114,7 +114,7 @@ func TestClientLeaderChange(t *testing.T) { for i := range endpointsWithWrongURL { endpointsWithWrongURL[i] = "https://" + strings.TrimPrefix(endpointsWithWrongURL[i], "http://") } - cli := setupCli(re, ctx, endpointsWithWrongURL) + cli := setupCli(ctx, re, endpointsWithWrongURL) defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) @@ -175,7 +175,7 @@ func TestLeaderTransferAndMoveCluster(t *testing.T) { }() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() var lastTS uint64 @@ -287,7 +287,7 @@ func TestTSOAllocatorLeader(t *testing.T) { }) allocatorLeaderMap[dcLocation] = pdName } - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) @@ -321,9 +321,9 @@ func TestTSOFollowerProxy(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli1 := setupCli(re, ctx, endpoints) + cli1 := setupCli(ctx, re, endpoints) defer cli1.Close() - cli2 := setupCli(re, ctx, endpoints) + cli2 := setupCli(ctx, re, endpoints) defer cli2.Close() cli2.UpdateOption(pd.EnableTSOFollowerProxy, true) @@ -385,7 +385,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() var wg sync.WaitGroup @@ -417,7 +417,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) { leader.Stop() re.NotEmpty(cluster.WaitLeader()) leaderReadyTime = time.Now() - cluster.RunServers([]*tests.TestServer{leader}) + tests.RunServers([]*tests.TestServer{leader}) }() wg.Wait() re.Less(maxUnavailableTime.UnixMilli(), leaderReadyTime.Add(1*time.Second).UnixMilli()) @@ -458,14 +458,14 @@ func TestGlobalAndLocalTSO(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) // Join a new dc-location - pd4, err := cluster.Join(ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(ctx, func(conf *config.Config, _ string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) @@ -586,7 +586,7 @@ func TestCustomTimeout(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints, pd.WithCustomTimeoutOption(time.Second)) + cli := setupCli(ctx, re, endpoints, pd.WithCustomTimeoutOption(time.Second)) defer cli.Close() start := time.Now() @@ -647,8 +647,7 @@ func (suite *followerForwardAndHandleTestSuite) SetupSuite() { }) } -func (suite *followerForwardAndHandleTestSuite) TearDownTest() { -} +func (*followerForwardAndHandleTestSuite) TearDownTest() {} func (suite *followerForwardAndHandleTestSuite) TearDownSuite() { suite.cluster.Destroy() @@ -660,7 +659,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionByFollowerForwardin ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork1", "return(true)")) time.Sleep(200 * time.Millisecond) @@ -680,7 +679,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoByFollowerForwarding1( re := suite.Require() ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)")) @@ -715,7 +714,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoByFollowerForwarding2( re := suite.Require() ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)")) @@ -752,7 +751,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoAndRegionByFollowerFor follower := cluster.GetServer(cluster.GetFollower()) re.NoError(failpoint.Enable("github.com/tikv/pd/client/grpcutil/unreachableNetwork2", fmt.Sprintf("return(\"%s\")", follower.GetAddr()))) - cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + cli := setupCli(ctx, re, suite.endpoints, pd.WithForwardingOption(true)) defer cli.Close() var lastTS uint64 testutil.Eventually(re, func() bool { @@ -821,7 +820,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromLeaderWhenNetwo follower := cluster.GetServer(cluster.GetFollower()) re.NoError(failpoint.Enable("github.com/tikv/pd/client/grpcutil/unreachableNetwork2", fmt.Sprintf("return(\"%s\")", follower.GetAddr()))) - cli := setupCli(re, ctx, suite.endpoints) + cli := setupCli(ctx, re, suite.endpoints) defer cli.Close() cluster.GetLeaderServer().GetServer().GetMember().ResignEtcdLeader(ctx, leader.GetServer().Name(), follower.GetServer().Name()) @@ -854,7 +853,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { defer cancel() cluster := suite.cluster - cli := setupCli(re, ctx, suite.endpoints) + cli := setupCli(ctx, re, suite.endpoints) defer cli.Close() cli.UpdateOption(pd.EnableFollowerHandle, true) re.NotEmpty(cluster.WaitLeader()) @@ -949,7 +948,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTSFuture() { re.NoError(failpoint.Enable("github.com/tikv/pd/client/shortDispatcherChannel", "return(true)")) - cli := setupCli(re, ctx, suite.endpoints) + cli := setupCli(ctx, re, suite.endpoints) ctxs := make([]context.Context, 20) cancels := make([]context.CancelFunc, 20) @@ -1015,7 +1014,7 @@ func runServer(re *require.Assertions, cluster *tests.TestCluster) []string { return endpoints } -func setupCli(re *require.Assertions, ctx context.Context, endpoints []string, opts ...pd.ClientOption) pd.Client { +func setupCli(ctx context.Context, re *require.Assertions, endpoints []string, opts ...pd.ClientOption) pd.Client { cli, err := pd.NewClientWithContext(ctx, endpoints, pd.SecurityOption{}, opts...) re.NoError(err) return cli @@ -1083,7 +1082,7 @@ func TestCloseClient(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) ts := cli.GetTSAsync(context.TODO()) time.Sleep(time.Second) cli.Close() @@ -1171,10 +1170,10 @@ func (suite *clientTestSuite) SetupSuite() { suite.grpcSvr = &server.GrpcServer{Server: suite.srv} server.MustWaitLeader(re, []*server.Server{suite.srv}) - suite.bootstrapServer(re, newHeader(suite.srv), suite.grpcPDClient) + bootstrapServer(re, newHeader(suite.srv), suite.grpcPDClient) suite.ctx, suite.clean = context.WithCancel(context.Background()) - suite.client = setupCli(re, suite.ctx, suite.srv.GetEndpoints()) + suite.client = setupCli(suite.ctx, re, suite.srv.GetEndpoints()) suite.regionHeartbeat, err = suite.grpcPDClient.RegionHeartbeat(suite.ctx) re.NoError(err) @@ -1216,7 +1215,7 @@ func newHeader(srv *server.Server) *pdpb.RequestHeader { } } -func (suite *clientTestSuite) bootstrapServer(re *require.Assertions, header *pdpb.RequestHeader, client pdpb.PDClient) { +func bootstrapServer(re *require.Assertions, header *pdpb.RequestHeader, client pdpb.PDClient) { regionID := regionIDAllocator.alloc() region := &metapb.Region{ Id: regionID, @@ -1781,7 +1780,7 @@ func TestWatch(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - client := setupCli(re, ctx, endpoints) + client := setupCli(ctx, re, endpoints) defer client.Close() key := "test" @@ -1824,7 +1823,7 @@ func TestPutGet(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - client := setupCli(re, ctx, endpoints) + client := setupCli(ctx, re, endpoints) defer client.Close() key := []byte("test") @@ -1859,7 +1858,7 @@ func TestClientWatchWithRevision(t *testing.T) { re.NoError(err) defer cluster.Destroy() endpoints := runServer(re, cluster) - client := setupCli(re, ctx, endpoints) + client := setupCli(ctx, re, endpoints) defer client.Close() s := cluster.GetLeaderServer() watchPrefix := "watch_test" @@ -1927,7 +1926,7 @@ func (suite *clientTestSuite) TestMemberUpdateBackOff() { defer cluster.Destroy() endpoints := runServer(re, cluster) - cli := setupCli(re, ctx, endpoints) + cli := setupCli(ctx, re, endpoints) defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) diff --git a/tests/integrations/client/client_tls_test.go b/tests/integrations/client/client_tls_test.go index bdfe050bf45..a5f0f5b200d 100644 --- a/tests/integrations/client/client_tls_test.go +++ b/tests/integrations/client/client_tls_test.go @@ -120,18 +120,18 @@ func TestTLSReloadAtomicReplace(t *testing.T) { err = os.Rename(certsDirExp, certsDir) re.NoError(err) } - testTLSReload(re, ctx, cloneFunc, replaceFunc, revertFunc) + testTLSReload(ctx, re, cloneFunc, replaceFunc, revertFunc) } func testTLSReload( - re *require.Assertions, ctx context.Context, + re *require.Assertions, cloneFunc func() transport.TLSInfo, replaceFunc func(), revertFunc func()) { tlsInfo := cloneFunc() // 1. start cluster with valid certs - clus, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + clus, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Security.TLSConfig = grpcutil.TLSConfig{ KeyPath: tlsInfo.KeyFile, CertPath: tlsInfo.CertFile, diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index 737fd09a08f..9232b134f20 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -135,6 +135,7 @@ func (suite *gcClientTestSuite) TestClientWatchWithRevision() { suite.testClientWatchWithRevision(true) } +// nolint func (suite *gcClientTestSuite) testClientWatchWithRevision(fromNewRevision bool) { re := suite.Require() testKeyspaceID := uint32(100) diff --git a/tests/integrations/client/global_config_test.go b/tests/integrations/client/global_config_test.go index c52a35159b0..d813ec99676 100644 --- a/tests/integrations/client/global_config_test.go +++ b/tests/integrations/client/global_config_test.go @@ -89,7 +89,7 @@ func (suite *globalConfigTestSuite) TearDownSuite() { suite.client.Close() } -func (suite *globalConfigTestSuite) GetEtcdPath(configPath string) string { +func getEtcdPath(configPath string) string { return globalConfigPath + configPath } @@ -97,10 +97,10 @@ func (suite *globalConfigTestSuite) TestLoadWithoutNames() { re := suite.Require() defer func() { // clean up - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) }() - r, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("test"), "test") + r, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath("test"), "test") re.NoError(err) res, err := suite.server.LoadGlobalConfig(suite.server.Context(), &pdpb.LoadGlobalConfigRequest{ ConfigPath: globalConfigPath, @@ -115,10 +115,10 @@ func (suite *globalConfigTestSuite) TestLoadWithoutConfigPath() { re := suite.Require() defer func() { // clean up - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("source_id")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("source_id")) re.NoError(err) }() - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("source_id"), "1") + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath("source_id"), "1") re.NoError(err) res, err := suite.server.LoadGlobalConfig(suite.server.Context(), &pdpb.LoadGlobalConfigRequest{ Names: []string{"source_id"}, @@ -132,7 +132,7 @@ func (suite *globalConfigTestSuite) TestLoadOtherConfigPath() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -155,7 +155,7 @@ func (suite *globalConfigTestSuite) TestLoadAndStore() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) } }() @@ -171,7 +171,7 @@ func (suite *globalConfigTestSuite) TestLoadAndStore() { re.Len(res.Items, 3) re.NoError(err) for i, item := range res.Items { - re.Equal(&pdpb.GlobalConfigItem{Kind: pdpb.EventType_PUT, Name: suite.GetEtcdPath(strconv.Itoa(i)), Payload: []byte(strconv.Itoa(i))}, item) + re.Equal(&pdpb.GlobalConfigItem{Kind: pdpb.EventType_PUT, Name: getEtcdPath(strconv.Itoa(i)), Payload: []byte(strconv.Itoa(i))}, item) } } @@ -179,7 +179,7 @@ func (suite *globalConfigTestSuite) TestStore() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) } }() @@ -190,9 +190,9 @@ func (suite *globalConfigTestSuite) TestStore() { }) re.NoError(err) for i := 0; i < 3; i++ { - res, err := suite.server.GetClient().Get(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + res, err := suite.server.GetClient().Get(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) - re.Equal(suite.GetEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) + re.Equal(getEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) } } @@ -201,7 +201,7 @@ func (suite *globalConfigTestSuite) TestWatch() { defer func() { for i := 0; i < 3; i++ { // clean up - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -213,11 +213,11 @@ func (suite *globalConfigTestSuite) TestWatch() { Revision: 0, }, server) for i := 0; i < 6; i++ { - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } for i := 3; i < 6; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } res, err := suite.server.LoadGlobalConfig(suite.server.Context(), &pdpb.LoadGlobalConfigRequest{ @@ -231,29 +231,29 @@ func (suite *globalConfigTestSuite) TestClientLoadWithoutNames() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } res, _, err := suite.client.LoadGlobalConfig(suite.server.Context(), nil, globalConfigPath) re.NoError(err) re.Len(res, 3) for i, item := range res { - re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: suite.GetEtcdPath(strconv.Itoa(i)), PayLoad: []byte(strconv.Itoa(i)), Value: strconv.Itoa(i)}, item) + re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: getEtcdPath(strconv.Itoa(i)), PayLoad: []byte(strconv.Itoa(i)), Value: strconv.Itoa(i)}, item) } } func (suite *globalConfigTestSuite) TestClientLoadWithoutConfigPath() { re := suite.Require() defer func() { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("source_id")) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("source_id")) re.NoError(err) }() - _, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("source_id"), "1") + _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath("source_id"), "1") re.NoError(err) res, _, err := suite.client.LoadGlobalConfig(suite.server.Context(), []string{"source_id"}, "") re.NoError(err) @@ -265,7 +265,7 @@ func (suite *globalConfigTestSuite) TestClientLoadOtherConfigPath() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -285,7 +285,7 @@ func (suite *globalConfigTestSuite) TestClientStore() { re := suite.Require() defer func() { for i := 0; i < 3; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() @@ -293,9 +293,9 @@ func (suite *globalConfigTestSuite) TestClientStore() { []pd.GlobalConfigItem{{Name: "0", Value: "0"}, {Name: "1", Value: "1"}, {Name: "2", Value: "2"}}) re.NoError(err) for i := 0; i < 3; i++ { - res, err := suite.server.GetClient().Get(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + res, err := suite.server.GetClient().Get(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) - re.Equal(suite.GetEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) + re.Equal(getEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) } } @@ -303,25 +303,25 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { re := suite.Require() ctx := suite.server.Context() defer func() { - _, err := suite.server.GetClient().Delete(ctx, suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(ctx, getEtcdPath("test")) re.NoError(err) for i := 3; i < 9; i++ { - _, err := suite.server.GetClient().Delete(ctx, suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(ctx, getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() // Mock get revision by loading - r, err := suite.server.GetClient().Put(ctx, suite.GetEtcdPath("test"), "test") + r, err := suite.server.GetClient().Put(ctx, getEtcdPath("test"), "test") re.NoError(err) res, revision, err := suite.client.LoadGlobalConfig(ctx, nil, globalConfigPath) re.NoError(err) re.Len(res, 1) suite.LessOrEqual(r.Header.GetRevision(), revision) - re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: suite.GetEtcdPath("test"), PayLoad: []byte("test"), Value: "test"}, res[0]) + re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: getEtcdPath("test"), PayLoad: []byte("test"), Value: "test"}, res[0]) // Mock when start watcher there are existed some keys, will load firstly for i := 0; i < 6; i++ { - _, err = suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err = suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } // Start watcher at next revision @@ -329,12 +329,12 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { re.NoError(err) // Mock delete for i := 0; i < 3; i++ { - _, err = suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err = suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } // Mock put for i := 6; i < 9; i++ { - _, err = suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) + _, err = suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } timer := time.NewTimer(time.Second) @@ -347,7 +347,7 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { return case res := <-configChan: for _, r := range res { - re.Equal(suite.GetEtcdPath(r.Value), r.Name) + re.Equal(getEtcdPath(r.Value), r.Name) } runTest = true } diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index 9efbc587847..d35b7f00584 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -121,7 +121,7 @@ func (suite *httpClientTestSuite) TearDownSuite() { // RunTestInTwoModes is to run test in two modes. func (suite *httpClientTestSuite) RunTestInTwoModes(test func(mode mode, client pd.Client)) { // Run test with specific service discovery. - cli := setupCli(suite.Require(), suite.env[specificServiceDiscovery].ctx, suite.env[specificServiceDiscovery].endpoints) + cli := setupCli(suite.env[specificServiceDiscovery].ctx, suite.Require(), suite.env[specificServiceDiscovery].endpoints) sd := cli.GetServiceDiscovery() client := pd.NewClientWithServiceDiscovery("pd-http-client-it-grpc", sd) test(specificServiceDiscovery, client) @@ -268,7 +268,7 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { re.NoError(err) re.Equal(bundles[0], bundle) // Check if we have the default rule. - suite.checkRuleResult(re, env, client, &pd.Rule{ + checkRuleResult(re, env, client, &pd.Rule{ GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: pd.Voter, @@ -277,7 +277,7 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { EndKey: []byte{}, }, 1, true) // Should be the same as the rules in the bundle. - suite.checkRuleResult(re, env, client, bundle.Rules[0], 1, true) + checkRuleResult(re, env, client, bundle.Rules[0], 1, true) testRule := &pd.Rule{ GroupID: placement.DefaultGroupID, ID: "test", @@ -288,24 +288,24 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { } err = client.SetPlacementRule(env.ctx, testRule) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 2, true) + checkRuleResult(re, env, client, testRule, 2, true) err = client.DeletePlacementRule(env.ctx, placement.DefaultGroupID, "test") re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, false) + checkRuleResult(re, env, client, testRule, 1, false) testRuleOp := &pd.RuleOp{ Rule: testRule, Action: pd.RuleOpAdd, } err = client.SetPlacementRuleInBatch(env.ctx, []*pd.RuleOp{testRuleOp}) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 2, true) + checkRuleResult(re, env, client, testRule, 2, true) testRuleOp = &pd.RuleOp{ Rule: testRule, Action: pd.RuleOpDel, } err = client.SetPlacementRuleInBatch(env.ctx, []*pd.RuleOp{testRuleOp}) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, false) + checkRuleResult(re, env, client, testRule, 1, false) err = client.SetPlacementRuleBundles(env.ctx, []*pd.GroupBundle{ { ID: placement.DefaultGroupID, @@ -313,7 +313,7 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { }, }, true) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, true) + checkRuleResult(re, env, client, testRule, 1, true) ruleGroups, err := client.GetAllPlacementRuleGroups(env.ctx) re.NoError(err) re.Len(ruleGroups, 1) @@ -347,10 +347,10 @@ func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { } err = client.SetPlacementRule(env.ctx, testRule) re.NoError(err) - suite.checkRuleResult(re, env, client, testRule, 1, true) + checkRuleResult(re, env, client, testRule, 1, true) } -func (suite *httpClientTestSuite) checkRuleResult( +func checkRuleResult( re *require.Assertions, env *httpClientTestEnv, client pd.Client, @@ -724,7 +724,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { re := suite.Require() env := suite.env[defaultServiceDiscovery] - cli := setupCli(suite.Require(), env.ctx, env.endpoints) + cli := setupCli(env.ctx, suite.Require(), env.endpoints) defer cli.Close() sd := cli.GetServiceDiscovery() diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index 1b61a264232..e8f574ff8de 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -126,7 +126,7 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin serverMap := make(map[string]bs.Server) for i := 0; i < serverNum; i++ { s, cleanup := suite.addServer(serviceName) - defer cleanup() + defer cleanup() // nolint serverMap[s.GetAddr()] = s } diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index ccec0a7cdc0..eec0909df61 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -91,7 +91,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { nodes := make(map[string]bs.Server) for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount+1; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + defer cleanup() // nolint nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) @@ -141,7 +141,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { nodes := make(map[string]bs.Server) for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + defer cleanup() // nolint nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) @@ -235,7 +235,7 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { nodesList := []string{} for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + defer cleanup() // nolint nodes[s.GetAddr()] = s nodesList = append(nodesList, s.GetAddr()) } @@ -296,7 +296,7 @@ func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { nodes := make(map[string]bs.Server) for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() + defer cleanup() // nolint nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) diff --git a/tests/integrations/mcs/resourcemanager/resource_manager_test.go b/tests/integrations/mcs/resourcemanager/resource_manager_test.go index f1ea8736fda..f8cf92dddac 100644 --- a/tests/integrations/mcs/resourcemanager/resource_manager_test.go +++ b/tests/integrations/mcs/resourcemanager/resource_manager_test.go @@ -78,7 +78,7 @@ func (suite *resourceManagerClientTestSuite) SetupSuite() { suite.client, err = pd.NewClientWithContext(suite.ctx, suite.cluster.GetConfig().GetClientURLs(), pd.SecurityOption{}) re.NoError(err) leader := suite.cluster.GetServer(suite.cluster.WaitLeader()) - suite.waitLeader(re, suite.client, leader.GetAddr()) + waitLeader(re, suite.client, leader.GetAddr()) suite.initGroups = []*rmpb.ResourceGroup{ { @@ -135,7 +135,7 @@ func (suite *resourceManagerClientTestSuite) SetupSuite() { } } -func (suite *resourceManagerClientTestSuite) waitLeader(re *require.Assertions, cli pd.Client, leaderAddr string) { +func waitLeader(re *require.Assertions, cli pd.Client, leaderAddr string) { innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) re.NotNil(innerCli) @@ -177,7 +177,7 @@ func (suite *resourceManagerClientTestSuite) resignAndWaitLeader(re *require.Ass re.NoError(suite.cluster.ResignLeader()) newLeader := suite.cluster.GetServer(suite.cluster.WaitLeader()) re.NotNil(newLeader) - suite.waitLeader(re, suite.client, newLeader.GetAddr()) + waitLeader(re, suite.client, newLeader.GetAddr()) } func (suite *resourceManagerClientTestSuite) TestWatchResourceGroup() { @@ -349,7 +349,7 @@ type tokenConsumptionPerSecond struct { waitDuration time.Duration } -func (t tokenConsumptionPerSecond) makeReadRequest() *controller.TestRequestInfo { +func (tokenConsumptionPerSecond) makeReadRequest() *controller.TestRequestInfo { return controller.NewTestRequestInfo(false, 0, 0) } @@ -365,7 +365,7 @@ func (t tokenConsumptionPerSecond) makeReadResponse() *controller.TestResponseIn ) } -func (t tokenConsumptionPerSecond) makeWriteResponse() *controller.TestResponseInfo { +func (tokenConsumptionPerSecond) makeWriteResponse() *controller.TestResponseInfo { return controller.NewTestResponseInfo( 0, time.Duration(0), @@ -960,7 +960,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { re.NoError(err) resp, err := http.Post(getAddr(i)+"/resource-manager/api/v1/config/group", "application/json", strings.NewReader(string(createJSON))) re.NoError(err) - defer resp.Body.Close() + resp.Body.Close() re.Equal(http.StatusOK, resp.StatusCode) if tcase.isNewGroup { finalNum++ @@ -975,7 +975,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { req.Header.Set("Content-Type", "application/json") resp, err = http.DefaultClient.Do(req) re.NoError(err) - defer resp.Body.Close() + resp.Body.Close() if tcase.modifySuccess { re.Equal(http.StatusOK, resp.StatusCode) } else { @@ -985,9 +985,9 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { // Get Resource Group resp, err = http.Get(getAddr(i) + "/resource-manager/api/v1/config/group/" + tcase.name) re.NoError(err) - defer resp.Body.Close() re.Equal(http.StatusOK, resp.StatusCode) respString, err := io.ReadAll(resp.Body) + resp.Body.Close() re.NoError(err) re.Contains(string(respString), tcase.name) if tcase.modifySuccess { @@ -998,9 +998,9 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { if i == len(testCasesSet1)-1 { resp, err := http.Get(getAddr(i) + "/resource-manager/api/v1/config/groups") re.NoError(err) - defer resp.Body.Close() re.Equal(http.StatusOK, resp.StatusCode) respString, err := io.ReadAll(resp.Body) + resp.Body.Close() re.NoError(err) groups := make([]*server.ResourceGroup, 0) json.Unmarshal(respString, &groups) @@ -1012,8 +1012,8 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { re.NoError(err) resp, err := http.DefaultClient.Do(req) re.NoError(err) - defer resp.Body.Close() respString, err := io.ReadAll(resp.Body) + resp.Body.Close() re.NoError(err) if g.Name == "default" { re.Contains(string(respString), "cannot delete reserved group") @@ -1026,9 +1026,9 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { // verify again resp1, err := http.Get(getAddr(i) + "/resource-manager/api/v1/config/groups") re.NoError(err) - defer resp1.Body.Close() re.Equal(http.StatusOK, resp1.StatusCode) respString1, err := io.ReadAll(resp1.Body) + resp1.Body.Close() re.NoError(err) groups1 := make([]server.ResourceGroup, 0) json.Unmarshal(respString1, &groups1) @@ -1046,7 +1046,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() { for _, s := range servers { serverList = append(serverList, s) } - re.NoError(suite.cluster.RunServers(serverList)) + re.NoError(tests.RunServers(serverList)) suite.cluster.WaitLeader() // re-connect client as well suite.client, err = pd.NewClientWithContext(suite.ctx, suite.cluster.GetConfig().GetClientURLs(), pd.SecurityOption{}) @@ -1314,9 +1314,8 @@ func (suite *resourceManagerClientTestSuite) TestCheckBackgroundJobs() { enableBackgroundGroup := func(enable bool) string { if enable { return "background_enable" - } else { - return "background_unable" } + return "background_unable" } // Mock add resource group. group := &rmpb.ResourceGroup{ diff --git a/tests/integrations/mcs/tso/api_test.go b/tests/integrations/mcs/tso/api_test.go index 32725418462..dc9bfa1e291 100644 --- a/tests/integrations/mcs/tso/api_test.go +++ b/tests/integrations/mcs/tso/api_test.go @@ -141,7 +141,7 @@ func TestTSOServerStartFirst(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - apiCluster, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + apiCluster, err := tests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{"k1", "k2"} }) defer apiCluster.Destroy() diff --git a/tests/integrations/mcs/tso/proxy_test.go b/tests/integrations/mcs/tso/proxy_test.go index 7ed329610f2..43877f262e2 100644 --- a/tests/integrations/mcs/tso/proxy_test.go +++ b/tests/integrations/mcs/tso/proxy_test.go @@ -84,7 +84,7 @@ func (s *tsoProxyTestSuite) SetupSuite() { } func (s *tsoProxyTestSuite) TearDownSuite() { - s.cleanupGRPCStreams(s.cleanupFuncs) + cleanupGRPCStreams(s.cleanupFuncs) s.tsoCluster.Destroy() s.apiCluster.Destroy() s.cancel() @@ -112,7 +112,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyWorksWithCancellation() { for j := 0; j < 10; j++ { s.verifyTSOProxy(s.ctx, streams, cleanupFuncs, 10, true) } - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) } }() for i := 0; i < 10; i++ { @@ -125,7 +125,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyWorksWithCancellation() { // TestTSOProxyStress tests the TSO Proxy can work correctly under the stress. gPRC and TSO failures are allowed, // but the TSO Proxy should not panic, blocked or deadlocked, and if it returns a timestamp, it should be a valid // timestamp monotonic increasing. After the stress, the TSO Proxy should still work correctly. -func TestTSOProxyStress(t *testing.T) { +func TestTSOProxyStress(_ *testing.T) { s := new(tsoProxyTestSuite) s.SetT(&testing.T{}) s.SetupSuite() @@ -154,7 +154,7 @@ func TestTSOProxyStress(t *testing.T) { cleanupFuncs = append(cleanupFuncs, cleanupFuncsTemp...) s.verifyTSOProxy(ctxTimeout, streams, cleanupFuncs, 50, false) } - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) log.Info("the stress test completed.") // Verify the TSO Proxy can still work correctly after the stress. @@ -192,7 +192,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyClientsWithSameContext() { } s.verifyTSOProxy(ctx, streams, cleanupFuncs, 100, true) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) } // TestTSOProxyRecvFromClientTimeout tests the TSO Proxy can properly close the grpc stream on the server side @@ -207,7 +207,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyRecvFromClientTimeout() { time.Sleep(2 * time.Second) err := streams[0].Send(s.defaultReq) re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxyRecvFromClientTimeout")) // Verify the streams with no fault injection can work correctly. @@ -226,7 +226,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyFailToSendToClient() { re.NoError(err) _, err = streams[0].Recv() re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxyFailToSendToClient")) s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 1, true) @@ -244,7 +244,7 @@ func (s *tsoProxyTestSuite) TestTSOProxySendToTSOTimeout() { re.NoError(err) _, err = streams[0].Recv() re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxySendToTSOTimeout")) s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 1, true) @@ -262,13 +262,13 @@ func (s *tsoProxyTestSuite) TestTSOProxyRecvFromTSOTimeout() { re.NoError(err) _, err = streams[0].Recv() re.Error(err) - s.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) re.NoError(failpoint.Disable("github.com/tikv/pd/server/tsoProxyRecvFromTSOTimeout")) s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 1, true) } -func (s *tsoProxyTestSuite) cleanupGRPCStreams(cleanupFuncs []testutil.CleanupFunc) { +func cleanupGRPCStreams(cleanupFuncs []testutil.CleanupFunc) { for i := 0; i < len(cleanupFuncs); i++ { if cleanupFuncs[i] != nil { cleanupFuncs[i]() @@ -277,7 +277,7 @@ func (s *tsoProxyTestSuite) cleanupGRPCStreams(cleanupFuncs []testutil.CleanupFu } } -func (s *tsoProxyTestSuite) cleanupGRPCStream( +func cleanupGRPCStream( streams []pdpb.PD_TsoClient, cleanupFuncs []testutil.CleanupFunc, index int, ) { if cleanupFuncs[index] != nil { @@ -318,7 +318,7 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( for j := 0; j < requestsPerClient; j++ { select { case <-ctx.Done(): - s.cleanupGRPCStream(streams, cleanupFuncs, i) + cleanupGRPCStream(streams, cleanupFuncs, i) return default: } @@ -327,14 +327,14 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( err := streams[i].Send(req) if err != nil && !mustReliable { respErr.Store(err) - s.cleanupGRPCStream(streams, cleanupFuncs, i) + cleanupGRPCStream(streams, cleanupFuncs, i) return } re.NoError(err) resp, err := streams[i].Recv() if err != nil && !mustReliable { respErr.Store(err) - s.cleanupGRPCStream(streams, cleanupFuncs, i) + cleanupGRPCStream(streams, cleanupFuncs, i) return } re.NoError(err) @@ -495,7 +495,7 @@ func benchmarkTSOProxyNClients(clientCount int, b *testing.B) { } b.StopTimer() - suite.cleanupGRPCStreams(cleanupFuncs) + cleanupGRPCStreams(cleanupFuncs) suite.TearDownSuite() } diff --git a/tests/integrations/realcluster/Makefile b/tests/integrations/realcluster/Makefile index 278f585feaa..e161d52a86e 100644 --- a/tests/integrations/realcluster/Makefile +++ b/tests/integrations/realcluster/Makefile @@ -22,8 +22,6 @@ static: install-tools @ gofmt -s -l -d . 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c $(ROOT_PATH)/.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config $(ROOT_PATH)/revive.toml ./... tidy: @ go mod tidy diff --git a/tests/registry/registry_test.go b/tests/registry/registry_test.go index dab2ccae683..416a7420d2e 100644 --- a/tests/registry/registry_test.go +++ b/tests/registry/registry_test.go @@ -41,18 +41,18 @@ func TestMain(m *testing.M) { type testServiceRegistry struct { } -func (t *testServiceRegistry) RegisterGRPCService(g *grpc.Server) { +func (*testServiceRegistry) RegisterGRPCService(g *grpc.Server) { grpc_testing.RegisterTestServiceServer(g, &grpc_testing.UnimplementedTestServiceServer{}) } -func (t *testServiceRegistry) RegisterRESTHandler(userDefineHandlers map[string]http.Handler) { +func (*testServiceRegistry) RegisterRESTHandler(userDefineHandlers map[string]http.Handler) { group := apiutil.APIServiceGroup{ Name: "my-http-service", Version: "v1alpha1", IsCore: false, PathPrefix: "/my-service", } - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Write([]byte("Hello World!")) }) diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index b70c688993d..98e458f6d17 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -51,7 +51,7 @@ func TestReconnect(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) @@ -577,7 +577,7 @@ func (suite *redirectorTestSuite) SetupSuite() { re := suite.Require() ctx, cancel := context.WithCancel(context.Background()) suite.cleanup = cancel - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.TickInterval = typeutil.Duration{Duration: 50 * time.Millisecond} conf.ElectionInterval = typeutil.Duration{Duration: 250 * time.Millisecond} }) @@ -703,7 +703,7 @@ func TestRemovingProgress(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) re.NoError(err) @@ -820,7 +820,7 @@ func TestSendApiWhenRestartRaftCluster(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) re.NoError(err) @@ -862,7 +862,7 @@ func TestPreparingProgress(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) re.NoError(err) diff --git a/tests/server/api/checker_test.go b/tests/server/api/checker_test.go index 198cfca216f..0304d7fd369 100644 --- a/tests/server/api/checker_test.go +++ b/tests/server/api/checker_test.go @@ -49,7 +49,7 @@ func (suite *checkerTestSuite) TestAPI() { func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { re := suite.Require() - suite.testErrCases(re, cluster) + testErrCases(re, cluster) testCases := []struct { name string @@ -62,12 +62,12 @@ func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { {name: "joint-state"}, } for _, testCase := range testCases { - suite.testGetStatus(re, cluster, testCase.name) - suite.testPauseOrResume(re, cluster, testCase.name) + testGetStatus(re, cluster, testCase.name) + testPauseOrResume(re, cluster, testCase.name) } } -func (suite *checkerTestSuite) testErrCases(re *require.Assertions, cluster *tests.TestCluster) { +func testErrCases(re *require.Assertions, cluster *tests.TestCluster) { urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) // missing args input := make(map[string]any) @@ -97,7 +97,7 @@ func (suite *checkerTestSuite) testErrCases(re *require.Assertions, cluster *tes re.NoError(err) } -func (suite *checkerTestSuite) testGetStatus(re *require.Assertions, cluster *tests.TestCluster, name string) { +func testGetStatus(re *require.Assertions, cluster *tests.TestCluster, name string) { input := make(map[string]any) urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) // normal run @@ -128,7 +128,7 @@ func (suite *checkerTestSuite) testGetStatus(re *require.Assertions, cluster *te re.False(resp["paused"].(bool)) } -func (suite *checkerTestSuite) testPauseOrResume(re *require.Assertions, cluster *tests.TestCluster, name string) { +func testPauseOrResume(re *require.Assertions, cluster *tests.TestCluster, name string) { input := make(map[string]any) urlPrefix := fmt.Sprintf("%s/pd/api/v1/checker", cluster.GetLeaderServer().GetAddr()) resp := make(map[string]any) diff --git a/tests/server/api/operator_test.go b/tests/server/api/operator_test.go index 32ca4ea300d..a5cd865b454 100644 --- a/tests/server/api/operator_test.go +++ b/tests/server/api/operator_test.go @@ -26,7 +26,6 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/operator" @@ -56,7 +55,7 @@ func TestOperatorTestSuite(t *testing.T) { func (suite *operatorTestSuite) SetupSuite() { suite.env = tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) } @@ -71,7 +70,7 @@ func (suite *operatorTestSuite) TestAddRemovePeer() { func (suite *operatorTestSuite) checkAddRemovePeer(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) stores := []*metapb.Store{ { Id: 1, @@ -206,7 +205,7 @@ func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestClus tests.MustPutStore(re, cluster, store) } - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1)) tests.MustPutRegionInfo(re, cluster, r1) r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetWrittenBytes(2000), core.SetReadBytes(0), core.SetRegionConfVer(2), core.SetRegionVersion(3)) @@ -233,7 +232,7 @@ func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestClus func (suite *operatorTestSuite) TestTransferRegionWithPlacementRule() { // use a new environment to avoid affecting other tests env := tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 3 }) env.RunTestInTwoModes(suite.checkTransferRegionWithPlacementRule) @@ -242,7 +241,7 @@ func (suite *operatorTestSuite) TestTransferRegionWithPlacementRule() { func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) stores := []*metapb.Store{ { Id: 1, @@ -513,7 +512,7 @@ func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *te func (suite *operatorTestSuite) TestGetOperatorsAsObject() { // use a new environment to avoid being affected by other tests env := tests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) env.RunTestInTwoModes(suite.checkGetOperatorsAsObject) @@ -522,7 +521,7 @@ func (suite *operatorTestSuite) TestGetOperatorsAsObject() { func (suite *operatorTestSuite) checkGetOperatorsAsObject(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) stores := []*metapb.Store{ { Id: 1, @@ -612,19 +611,6 @@ func (suite *operatorTestSuite) checkGetOperatorsAsObject(cluster *tests.TestClu re.Equal("admin-add-peer", resp[2].Desc) } -// pauseRuleChecker will pause rule checker to avoid unexpected operator. -func (suite *operatorTestSuite) pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { - checkerName := "rule" - addr := cluster.GetLeaderServer().GetAddr() - resp := make(map[string]any) - url := fmt.Sprintf("%s/pd/api/v1/checker/%s", addr, checkerName) - err := tu.CheckPostJSON(testDialClient, url, []byte(`{"delay":1000}`), tu.StatusOK(re)) - re.NoError(err) - err = tu.ReadGetJSON(re, testDialClient, url, &resp) - re.NoError(err) - re.True(resp["paused"].(bool)) -} - func (suite *operatorTestSuite) TestRemoveOperators() { suite.env.RunTestInTwoModes(suite.checkRemoveOperators) } @@ -656,7 +642,7 @@ func (suite *operatorTestSuite) checkRemoveOperators(cluster *tests.TestCluster) tests.MustPutStore(re, cluster, store) } - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1)) tests.MustPutRegionInfo(re, cluster, r1) r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetWrittenBytes(2000), core.SetReadBytes(0), core.SetRegionConfVer(2), core.SetRegionVersion(3)) diff --git a/tests/server/api/region_test.go b/tests/server/api/region_test.go index 8c286dc12e2..b233ce94a99 100644 --- a/tests/server/api/region_test.go +++ b/tests/server/api/region_test.go @@ -114,14 +114,14 @@ func (suite *regionTestSuite) checkSplitRegions(cluster *tests.TestCluster) { r1 := core.NewTestRegionInfo(601, 13, []byte("aaa"), []byte("ggg")) r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 5, StoreId: 14}, &metapb.Peer{Id: 6, StoreId: 15}) tests.MustPutRegionInfo(re, cluster, r1) - suite.checkRegionCount(re, cluster, 1) + checkRegionCount(re, cluster, 1) newRegionID := uint64(11) body := fmt.Sprintf(`{"retry_limit":%v, "split_keys": ["%s","%s","%s"]}`, 3, hex.EncodeToString([]byte("bbb")), hex.EncodeToString([]byte("ccc")), hex.EncodeToString([]byte("ddd"))) - checkOpt := func(res []byte, code int, _ http.Header) { + checkOpt := func(res []byte, _ int, _ http.Header) { s := &struct { ProcessedPercentage int `json:"processed-percentage"` NewRegionsID []uint64 `json:"regions-id"` @@ -159,7 +159,7 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tes r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 100 + i, StoreId: (i + 1) % regionCount}, &metapb.Peer{Id: 200 + i, StoreId: (i + 2) % regionCount}) tests.MustPutRegionInfo(re, cluster, r1) } - suite.checkRegionCount(re, cluster, regionCount) + checkRegionCount(re, cluster, regionCount) body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3"))) err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/accelerate-schedule", urlPrefix), []byte(body), @@ -194,7 +194,7 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te r1.GetMeta().Peers = append(r1.GetMeta().Peers, &metapb.Peer{Id: 100 + i, StoreId: (i + 1) % regionCount}, &metapb.Peer{Id: 200 + i, StoreId: (i + 2) % regionCount}) tests.MustPutRegionInfo(re, cluster, r1) } - suite.checkRegionCount(re, cluster, regionCount) + checkRegionCount(re, cluster, regionCount) body := fmt.Sprintf(`[{"start_key":"%s", "end_key": "%s"}, {"start_key":"%s", "end_key": "%s"}]`, hex.EncodeToString([]byte("a1")), hex.EncodeToString([]byte("a3")), hex.EncodeToString([]byte("a4")), hex.EncodeToString([]byte("a6"))) @@ -236,7 +236,7 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) { tests.MustPutRegionInfo(re, cluster, r1) tests.MustPutRegionInfo(re, cluster, r2) tests.MustPutRegionInfo(re, cluster, r3) - suite.checkRegionCount(re, cluster, 3) + checkRegionCount(re, cluster, 3) body := fmt.Sprintf(`{"start_key":"%s", "end_key": "%s"}`, hex.EncodeToString([]byte("b1")), hex.EncodeToString([]byte("b3"))) err := tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/regions/scatter", urlPrefix), []byte(body), tu.StatusOK(re)) @@ -263,7 +263,7 @@ func (suite *regionTestSuite) TestCheckRegionsReplicated() { func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) { re := suite.Require() - suite.pauseRuleChecker(re, cluster) + pauseRuleChecker(re, cluster) leader := cluster.GetLeaderServer() urlPrefix := leader.GetAddr() + "/pd/api/v1" @@ -276,7 +276,7 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) tests.MustPutStore(re, cluster, s1) r1 := core.NewTestRegionInfo(2, 1, []byte("a"), []byte("b")) tests.MustPutRegionInfo(re, cluster, r1) - suite.checkRegionCount(re, cluster, 1) + checkRegionCount(re, cluster, 1) // set the bundle bundle := []placement.GroupBundle{ @@ -404,7 +404,7 @@ func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) }) } -func (suite *regionTestSuite) checkRegionCount(re *require.Assertions, cluster *tests.TestCluster, count uint64) { +func checkRegionCount(re *require.Assertions, cluster *tests.TestCluster, count uint64) { leader := cluster.GetLeaderServer() tu.Eventually(re, func() bool { return leader.GetRaftCluster().GetRegionCount([]byte{}, []byte{}).Count == int(count) @@ -417,7 +417,7 @@ func (suite *regionTestSuite) checkRegionCount(re *require.Assertions, cluster * } // pauseRuleChecker will pause rule checker to avoid unexpected operator. -func (suite *regionTestSuite) pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { +func pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { checkerName := "rule" addr := cluster.GetLeaderServer().GetAddr() resp := make(map[string]any) diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 83ab0f1cebb..4f60b5cfb28 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -49,7 +49,7 @@ func TestRuleTestSuite(t *testing.T) { } func (suite *ruleTestSuite) SetupSuite() { - suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, _ string) { conf.PDServerCfg.KeyType = "raw" conf.Replication.EnablePlacementRules = true }) @@ -235,7 +235,7 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { if testCase.found { tu.Eventually(re, func() bool { err = tu.ReadGetJSON(re, testDialClient, url, &resp) - return suite.compareRule(&resp, &testCase.rule) + return compareRule(&resp, &testCase.rule) }) } else { err = tu.CheckGetJSON(testDialClient, url, nil, tu.Status(re, testCase.code)) @@ -432,7 +432,7 @@ func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { return false } if testCase.count == 2 { - return suite.compareRule(resp[0], &rule) && suite.compareRule(resp[1], &rule1) + return compareRule(resp[0], &rule) && compareRule(resp[1], &rule1) } return true }) @@ -492,7 +492,7 @@ func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { err = tu.ReadGetJSON(re, testDialClient, url, &resp) for _, r := range resp { if r.GroupID == "e" { - return suite.compareRule(r, &rule) + return compareRule(r, &rule) } } return true @@ -780,7 +780,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { }, }, } - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) // Set b2 := placement.GroupBundle{ @@ -797,17 +797,17 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { re.NoError(err) // Get - suite.assertBundleEqual(re, urlPrefix+"/placement-rule/foo", b2) + assertBundleEqual(re, urlPrefix+"/placement-rule/foo", b2) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2}, 2) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2}, 2) // Delete err = tu.CheckDelete(testDialClient, urlPrefix+"/placement-rule/pd", tu.StatusOK(re)) re.NoError(err) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b2}, 1) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b2}, 1) // SetAll b2.Rules = append(b2.Rules, &placement.Rule{GroupID: "foo", ID: "baz", Index: 2, Role: placement.Follower, Count: 1}) @@ -819,14 +819,14 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { re.NoError(err) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2, b3}, 3) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b2, b3}, 3) // Delete using regexp err = tu.CheckDelete(testDialClient, urlPrefix+"/placement-rule/"+url.PathEscape("foo.*")+"?regexp", tu.StatusOK(re)) re.NoError(err) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1}, 1) // Set id := "rule-without-group-id" @@ -844,10 +844,10 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { b4.ID = id b4.Rules[0].GroupID = b4.ID // Get - suite.assertBundleEqual(re, urlPrefix+"/placement-rule/"+id, b4) + assertBundleEqual(re, urlPrefix+"/placement-rule/"+id, b4) // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4}, 2) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4}, 2) // SetAll b5 := placement.GroupBundle{ @@ -865,7 +865,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { b5.Rules[0].GroupID = b5.ID // GetAll again - suite.assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4, b5}, 3) + assertBundlesEqual(re, urlPrefix+"/placement-rule", []placement.GroupBundle{b1, b4, b5}, 3) } func (suite *ruleTestSuite) TestBundleBadRequest() { @@ -1194,18 +1194,18 @@ func (suite *ruleTestSuite) checkLargeRules(cluster *tests.TestCluster) { suite.postAndCheckRuleBundle(urlPrefix, genBundlesWithRulesNum(etcdutil.MaxEtcdTxnOps*2)) } -func (suite *ruleTestSuite) assertBundleEqual(re *require.Assertions, url string, expectedBundle placement.GroupBundle) { +func assertBundleEqual(re *require.Assertions, url string, expectedBundle placement.GroupBundle) { var bundle placement.GroupBundle tu.Eventually(re, func() bool { err := tu.ReadGetJSON(re, testDialClient, url, &bundle) if err != nil { return false } - return suite.compareBundle(bundle, expectedBundle) + return compareBundle(bundle, expectedBundle) }) } -func (suite *ruleTestSuite) assertBundlesEqual(re *require.Assertions, url string, expectedBundles []placement.GroupBundle, expectedLen int) { +func assertBundlesEqual(re *require.Assertions, url string, expectedBundles []placement.GroupBundle, expectedLen int) { var bundles []placement.GroupBundle tu.Eventually(re, func() bool { err := tu.ReadGetJSON(re, testDialClient, url, &bundles) @@ -1218,7 +1218,7 @@ func (suite *ruleTestSuite) assertBundlesEqual(re *require.Assertions, url strin sort.Slice(bundles, func(i, j int) bool { return bundles[i].ID < bundles[j].ID }) sort.Slice(expectedBundles, func(i, j int) bool { return expectedBundles[i].ID < expectedBundles[j].ID }) for i := range bundles { - if !suite.compareBundle(bundles[i], expectedBundles[i]) { + if !compareBundle(bundles[i], expectedBundles[i]) { return false } } @@ -1226,21 +1226,21 @@ func (suite *ruleTestSuite) assertBundlesEqual(re *require.Assertions, url strin }) } -func (suite *ruleTestSuite) compareBundle(b1, b2 placement.GroupBundle) bool { +func compareBundle(b1, b2 placement.GroupBundle) bool { if b2.ID != b1.ID || b2.Index != b1.Index || b2.Override != b1.Override || len(b2.Rules) != len(b1.Rules) { return false } sort.Slice(b1.Rules, func(i, j int) bool { return b1.Rules[i].ID < b1.Rules[j].ID }) sort.Slice(b2.Rules, func(i, j int) bool { return b2.Rules[i].ID < b2.Rules[j].ID }) for i := range b1.Rules { - if !suite.compareRule(b1.Rules[i], b2.Rules[i]) { + if !compareRule(b1.Rules[i], b2.Rules[i]) { return false } } return true } -func (suite *ruleTestSuite) compareRule(r1 *placement.Rule, r2 *placement.Rule) bool { +func compareRule(r1 *placement.Rule, r2 *placement.Rule) bool { return r2.GroupID == r1.GroupID && r2.ID == r1.ID && r2.StartKeyHex == r1.StartKeyHex && @@ -1267,7 +1267,7 @@ func (suite *ruleTestSuite) postAndCheckRuleBundle(urlPrefix string, bundle []pl sort.Slice(respBundle, func(i, j int) bool { return respBundle[i].ID < respBundle[j].ID }) sort.Slice(bundle, func(i, j int) bool { return bundle[i].ID < bundle[j].ID }) for i := range respBundle { - if !suite.compareBundle(respBundle[i], bundle[i]) { + if !compareBundle(respBundle[i], bundle[i]) { return false } } @@ -1285,7 +1285,7 @@ func TestRegionRuleTestSuite(t *testing.T) { } func (suite *regionRuleTestSuite) SetupSuite() { - suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, serverName string) { + suite.env = tests.NewSchedulingTestEnvironment(suite.T(), func(conf *config.Config, _ string) { conf.Replication.EnablePlacementRules = true conf.Replication.MaxReplicas = 1 }) @@ -1396,14 +1396,14 @@ func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCl re.Equal("keyspaces/0", labels[0].ID) u = fmt.Sprintf("%s/config/region-label/rules/ids", urlPrefix) - err = tu.CheckGetJSON(testDialClient, u, []byte(`["rule1", "rule3"]`), func(resp []byte, statusCode int, _ http.Header) { + err = tu.CheckGetJSON(testDialClient, u, []byte(`["rule1", "rule3"]`), func(resp []byte, _ int, _ http.Header) { err := json.Unmarshal(resp, &labels) re.NoError(err) re.Empty(labels) }) re.NoError(err) - err = tu.CheckGetJSON(testDialClient, u, []byte(`["keyspaces/0"]`), func(resp []byte, statusCode int, _ http.Header) { + err = tu.CheckGetJSON(testDialClient, u, []byte(`["keyspaces/0"]`), func(resp []byte, _ int, _ http.Header) { err := json.Unmarshal(resp, &labels) re.NoError(err) re.Len(labels, 1) diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 2329077209d..4f71315803a 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -123,7 +123,7 @@ func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { re.NoError(failpoint.Disable("github.com/tikv/pd/server/config/persistFail")) err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) re.NoError(err) - suite.assertNoScheduler(re, urlPrefix, "evict-leader-scheduler") + assertNoScheduler(re, urlPrefix, "evict-leader-scheduler") re.NoError(tu.CheckGetJSON(testDialClient, listURL, nil, tu.Status(re, http.StatusNotFound))) err = tu.CheckDelete(testDialClient, deleteURL, tu.Status(re, http.StatusNotFound)) re.NoError(err) @@ -531,8 +531,8 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if testCase.extraTestFunc != nil { testCase.extraTestFunc(testCase.createdName) } - suite.deleteScheduler(re, urlPrefix, testCase.createdName) - suite.assertNoScheduler(re, urlPrefix, testCase.createdName) + deleteScheduler(re, urlPrefix, testCase.createdName) + assertNoScheduler(re, urlPrefix, testCase.createdName) } // test pause and resume all schedulers. @@ -546,7 +546,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { } body, err := json.Marshal(input) re.NoError(err) - suite.addScheduler(re, urlPrefix, body) + addScheduler(re, urlPrefix, body) suite.assertSchedulerExists(urlPrefix, testCase.createdName) // wait for scheduler to be synced. if testCase.extraTestFunc != nil { testCase.extraTestFunc(testCase.createdName) @@ -566,7 +566,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.True(isPaused) } input["delay"] = 1 @@ -580,7 +580,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) } @@ -600,7 +600,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) } @@ -610,8 +610,8 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { if createdName == "" { createdName = testCase.name } - suite.deleteScheduler(re, urlPrefix, createdName) - suite.assertNoScheduler(re, urlPrefix, createdName) + deleteScheduler(re, urlPrefix, createdName) + assertNoScheduler(re, urlPrefix, createdName) } } @@ -638,7 +638,7 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { input["name"] = name body, err := json.Marshal(input) re.NoError(err) - suite.addScheduler(re, urlPrefix, body) + addScheduler(re, urlPrefix, body) u := fmt.Sprintf("%s%s/api/v1/config/schedule", leaderAddr, apiPrefix) var scheduleConfig sc.ScheduleConfig @@ -652,7 +652,7 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, u, body, tu.StatusOK(re)) re.NoError(err) - suite.assertNoScheduler(re, urlPrefix, name) + assertNoScheduler(re, urlPrefix, name) suite.assertSchedulerExists(fmt.Sprintf("%s?status=disabled", urlPrefix), name) // reset schedule config @@ -662,16 +662,16 @@ func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, u, body, tu.StatusOK(re)) re.NoError(err) - suite.deleteScheduler(re, urlPrefix, name) - suite.assertNoScheduler(re, urlPrefix, name) + deleteScheduler(re, urlPrefix, name) + assertNoScheduler(re, urlPrefix, name) } -func (suite *scheduleTestSuite) addScheduler(re *require.Assertions, urlPrefix string, body []byte) { +func addScheduler(re *require.Assertions, urlPrefix string, body []byte) { err := tu.CheckPostJSON(testDialClient, urlPrefix, body, tu.StatusOK(re)) re.NoError(err) } -func (suite *scheduleTestSuite) deleteScheduler(re *require.Assertions, urlPrefix string, createdName string) { +func deleteScheduler(re *require.Assertions, urlPrefix string, createdName string) { deleteURL := fmt.Sprintf("%s/%s", urlPrefix, createdName) err := tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) re.NoError(err) @@ -696,7 +696,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre re.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) re.NoError(err) - isPaused := suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused := isSchedulerPaused(re, urlPrefix, createdName) re.True(isPaused) input["delay"] = 1 pauseArgs, err = json.Marshal(input) @@ -704,7 +704,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) re.NoError(err) time.Sleep(time.Second * 2) - isPaused = suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused = isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) // test resume. @@ -719,7 +719,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre re.NoError(err) err = tu.CheckPostJSON(testDialClient, urlPrefix+"/"+createdName, pauseArgs, tu.StatusOK(re)) re.NoError(err) - isPaused = suite.isSchedulerPaused(re, urlPrefix, createdName) + isPaused = isSchedulerPaused(re, urlPrefix, createdName) re.False(isPaused) } @@ -749,9 +749,9 @@ func (suite *scheduleTestSuite) checkEmptySchedulers(cluster *tests.TestCluster) input["name"] = scheduler body, err := json.Marshal(input) re.NoError(err) - suite.addScheduler(re, urlPrefix, body) + addScheduler(re, urlPrefix, body) } else { - suite.deleteScheduler(re, urlPrefix, scheduler) + deleteScheduler(re, urlPrefix, scheduler) } } tu.Eventually(re, func() bool { @@ -777,7 +777,7 @@ func (suite *scheduleTestSuite) assertSchedulerExists(urlPrefix string, schedule }) } -func (suite *scheduleTestSuite) assertNoScheduler(re *require.Assertions, urlPrefix string, scheduler string) { +func assertNoScheduler(re *require.Assertions, urlPrefix string, scheduler string) { var schedulers []string tu.Eventually(re, func() bool { err := tu.ReadGetJSON(re, testDialClient, urlPrefix, &schedulers, @@ -787,7 +787,7 @@ func (suite *scheduleTestSuite) assertNoScheduler(re *require.Assertions, urlPre }) } -func (suite *scheduleTestSuite) isSchedulerPaused(re *require.Assertions, urlPrefix, name string) bool { +func isSchedulerPaused(re *require.Assertions, urlPrefix, name string) bool { var schedulers []string err := tu.ReadGetJSON(re, testDialClient, fmt.Sprintf("%s?status=paused", urlPrefix), &schedulers, tu.StatusOK(re)) diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 3415c22a77b..aea5ff73968 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -824,7 +824,7 @@ func TestSetScheduleOpt(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // TODO: enable placementrules - tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, svr string) { cfg.Replication.EnablePlacementRules = false }) + tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() re.NoError(err) @@ -985,7 +985,7 @@ func TestTiFlashWithPlacementRules(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, name string) { cfg.Replication.EnablePlacementRules = false }) + tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() re.NoError(err) err = tc.RunInitialServers() @@ -1035,7 +1035,7 @@ func TestReplicationModeStatus(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.ReplicationMode.ReplicationMode = "dr-auto-sync" }) diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go index 108bc5fc753..b6fcecbd47b 100644 --- a/tests/server/config/config_test.go +++ b/tests/server/config/config_test.go @@ -451,7 +451,7 @@ type ttlConfigInterface interface { IsTikvRegionSplitEnabled() bool } -func (suite *configTestSuite) assertTTLConfig( +func assertTTLConfig( re *require.Assertions, cluster *tests.TestCluster, expectedEqual bool, @@ -488,7 +488,7 @@ func (suite *configTestSuite) assertTTLConfig( } } -func (suite *configTestSuite) assertTTLConfigItemEqual( +func assertTTLConfigItemEqual( re *require.Assertions, cluster *tests.TestCluster, item string, @@ -532,22 +532,22 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { // test no config and cleaning up err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 0), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, false) + assertTTLConfig(re, cluster, false) // test time goes by err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 5), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, true) + assertTTLConfig(re, cluster, true) time.Sleep(5 * time.Second) - suite.assertTTLConfig(re, cluster, false) + assertTTLConfig(re, cluster, false) // test cleaning up err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 5), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, true) + assertTTLConfig(re, cluster, true) err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 0), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, false) + assertTTLConfig(re, cluster, false) postData, err = json.Marshal(invalidTTLConfig) re.NoError(err) @@ -564,9 +564,9 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 1), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfigItemEqual(re, cluster, "max-merge-region-size", uint64(999)) + assertTTLConfigItemEqual(re, cluster, "max-merge-region-size", uint64(999)) // max-merge-region-keys should keep consistence with max-merge-region-size. - suite.assertTTLConfigItemEqual(re, cluster, "max-merge-region-keys", uint64(999*10000)) + assertTTLConfigItemEqual(re, cluster, "max-merge-region-keys", uint64(999*10000)) // on invalid value, we use default config mergeConfig = map[string]any{ @@ -576,7 +576,7 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { re.NoError(err) err = tu.CheckPostJSON(testDialClient, createTTLUrl(urlPrefix, 10), postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfigItemEqual(re, cluster, "enable-tikv-split-region", true) + assertTTLConfigItemEqual(re, cluster, "enable-tikv-split-region", true) } func (suite *configTestSuite) TestTTLConflict() { @@ -592,7 +592,7 @@ func (suite *configTestSuite) checkTTLConflict(cluster *tests.TestCluster) { re.NoError(err) err = tu.CheckPostJSON(testDialClient, addr, postData, tu.StatusOK(re)) re.NoError(err) - suite.assertTTLConfig(re, cluster, true) + assertTTLConfig(re, cluster, true) cfg := map[string]any{"max-snapshot-count": 30} postData, err = json.Marshal(cfg) diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index 5cdcbc090b8..32e66c27589 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -105,7 +105,7 @@ func TestFailedAndDeletedPDJoinsPreviousCluster(t *testing.T) { re.NoError(err) // The server should not successfully start. - res := cluster.RunServer(pd3) + res := tests.RunServer(pd3) re.Error(<-res) members, err := etcdutil.ListEtcdMembers(ctx, client) @@ -138,7 +138,7 @@ func TestDeletedPDJoinsPreviousCluster(t *testing.T) { re.NoError(err) // The server should not successfully start. - res := cluster.RunServer(pd3) + res := tests.RunServer(pd3) re.Error(<-res) members, err := etcdutil.ListEtcdMembers(ctx, client) diff --git a/tests/server/keyspace/keyspace_test.go b/tests/server/keyspace/keyspace_test.go index aa2e89296bb..d6e188359ce 100644 --- a/tests/server/keyspace/keyspace_test.go +++ b/tests/server/keyspace/keyspace_test.go @@ -53,7 +53,7 @@ func (suite *keyspaceTestSuite) SetupTest() { re := suite.Require() ctx, cancel := context.WithCancel(context.Background()) suite.cancel = cancel - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = preAllocKeyspace }) suite.cluster = cluster diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 7aadc2772e8..92ed11a75ce 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -152,7 +152,7 @@ func TestLeaderPriority(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.LeaderPriorityCheckInterval = typeutil.NewDuration(time.Second) }) defer cluster.Destroy() diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index 1470173e0ed..f82346571ef 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -40,7 +40,7 @@ func TestRegionSyncer(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/syncer/noFastExitSync", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/syncer/disableClientStreaming", `return(true)`)) - cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer func() { cluster.Destroy() cancel() @@ -163,7 +163,7 @@ func TestFullSyncWithAddMember(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() re.NoError(err) @@ -207,7 +207,7 @@ func TestPrepareChecker(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() re.NoError(err) @@ -256,7 +256,7 @@ func TestPrepareCheckerWithTransferLeader(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() re.NoError(err) diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 3b85cd3cf0d..adf7202454b 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -98,7 +98,7 @@ func TestClusterID(t *testing.T) { re.Equal(clusterID, s.GetClusterID()) } - cluster2, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.InitialClusterToken = "foobar" }) + cluster2, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, _ string) { conf.InitialClusterToken = "foobar" }) defer cluster2.Destroy() re.NoError(err) err = cluster2.RunInitialServers() diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index 12110be0249..b63b533bc0f 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -37,7 +37,7 @@ func TestHotRegionStorage(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = 1000 * time.Millisecond cfg.Schedule.HotRegionsReservedDays = 1 @@ -145,7 +145,7 @@ func TestHotRegionStorageReservedDayConfigChange(t *testing.T) { interval := 100 * time.Millisecond defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = interval cfg.Schedule.HotRegionsReservedDays = 1 @@ -237,7 +237,7 @@ func TestHotRegionStorageWriteIntervalConfigChange(t *testing.T) { interval := 100 * time.Millisecond defer cancel() cluster, err := tests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = interval cfg.Schedule.HotRegionsReservedDays = 1 diff --git a/tests/server/tso/allocator_test.go b/tests/server/tso/allocator_test.go index 3bc4d56ac58..692aec490eb 100644 --- a/tests/server/tso/allocator_test.go +++ b/tests/server/tso/allocator_test.go @@ -132,7 +132,7 @@ func TestPriorityAndDifferentLocalTSO(t *testing.T) { time.Sleep(time.Second * 5) // Join a new dc-location - pd4, err := cluster.Join(ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(ctx, func(conf *config.Config, _ string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) diff --git a/tests/server/tso/consistency_test.go b/tests/server/tso/consistency_test.go index d1c45df7f17..1bf20cce20d 100644 --- a/tests/server/tso/consistency_test.go +++ b/tests/server/tso/consistency_test.go @@ -275,7 +275,7 @@ func (suite *tsoConsistencyTestSuite) TestLocalTSOAfterMemberChanged() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/systemTimeSlow", `return(true)`)) // Join a new dc-location - pd4, err := cluster.Join(suite.ctx, func(conf *config.Config, serverName string) { + pd4, err := cluster.Join(suite.ctx, func(conf *config.Config, _ string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = "dc-4" }) diff --git a/tests/server/tso/global_tso_test.go b/tests/server/tso/global_tso_test.go index 5ae2e6e0f67..f705bdf12b5 100644 --- a/tests/server/tso/global_tso_test.go +++ b/tests/server/tso/global_tso_test.go @@ -137,7 +137,7 @@ func TestLogicalOverflow(t *testing.T) { runCase := func(updateInterval time.Duration) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.TSOUpdatePhysicalInterval = typeutil.Duration{Duration: updateInterval} }) defer cluster.Destroy() diff --git a/tests/server/watch/leader_watch_test.go b/tests/server/watch/leader_watch_test.go index f7765297023..84e16398677 100644 --- a/tests/server/watch/leader_watch_test.go +++ b/tests/server/watch/leader_watch_test.go @@ -35,7 +35,7 @@ func TestWatcher(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.AutoCompactionRetention = "1s" }) defer cluster.Destroy() re.NoError(err) @@ -73,7 +73,7 @@ func TestWatcherCompacted(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.AutoCompactionRetention = "1s" }) defer cluster.Destroy() re.NoError(err) diff --git a/tests/testutil.go b/tests/testutil.go index 106cddc9dfb..5d9905af64c 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -273,14 +273,14 @@ func (s *SchedulingTestEnvironment) RunTestInTwoModes(test func(*TestCluster)) { // RunTestInPDMode is to run test in pd mode. func (s *SchedulingTestEnvironment) RunTestInPDMode(test func(*TestCluster)) { - s.t.Logf("start test %s in pd mode", s.getTestName()) + s.t.Logf("start test %s in pd mode", getTestName()) if _, ok := s.clusters[pdMode]; !ok { s.startCluster(pdMode) } test(s.clusters[pdMode]) } -func (s *SchedulingTestEnvironment) getTestName() string { +func getTestName() string { pc, _, _, _ := runtime.Caller(2) caller := runtime.FuncForPC(pc) if caller == nil || strings.Contains(caller.Name(), "RunTestInTwoModes") { @@ -303,7 +303,7 @@ func (s *SchedulingTestEnvironment) RunTestInAPIMode(test func(*TestCluster)) { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) }() - s.t.Logf("start test %s in api mode", s.getTestName()) + s.t.Logf("start test %s in api mode", getTestName()) if _, ok := s.clusters[apiMode]; !ok { s.startCluster(apiMode) } diff --git a/tests/tso_cluster.go b/tests/tso_cluster.go index 4021613df2a..e1fdb6d69ca 100644 --- a/tests/tso_cluster.go +++ b/tests/tso_cluster.go @@ -76,7 +76,7 @@ func RestartTestTSOCluster( defer wg.Done() clean() serverCfg := cluster.servers[addr].GetConfig() - newServer, newCleanup, err := NewTSOTestServer(newCluster.ctx, serverCfg) + newServer, newCleanup, err := NewTSOTestServer(ctx, serverCfg) serverMap.Store(addr, newServer) cleanupMap.Store(addr, newCleanup) errorMap.Store(addr, err) diff --git a/tools.go b/tools.go index 909f42ab9b5..e5298de2827 100644 --- a/tools.go +++ b/tools.go @@ -20,7 +20,6 @@ package tools import ( _ "github.com/AlekSi/gocov-xml" _ "github.com/axw/gocov/gocov" - _ "github.com/mgechev/revive" _ "github.com/pingcap/errors/errdoc-gen" _ "github.com/pingcap/failpoint/failpoint-ctl" _ "github.com/swaggo/swag/cmd/swag" diff --git a/tools/Makefile b/tools/Makefile index 336cc536949..4195160aff6 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -25,8 +25,6 @@ static: install-tools @ gofmt -s -l -d . 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @ golangci-lint run -c $(ROOT_PATH)/.golangci.yml --verbose ./... --allow-parallel-runners - @ echo "revive ..." - @ revive -formatter friendly -config $(ROOT_PATH)/revive.toml ./... tidy: @ go mod tidy diff --git a/tools/pd-analysis/analysis/parse_log.go b/tools/pd-analysis/analysis/parse_log.go index 44ae617284f..f096e3fe380 100644 --- a/tools/pd-analysis/analysis/parse_log.go +++ b/tools/pd-analysis/analysis/parse_log.go @@ -42,7 +42,7 @@ type Interpreter interface { } // CompileRegex is to provide regexp for transfer counter. -func (c *TransferCounter) CompileRegex(operator string) (*regexp.Regexp, error) { +func (*TransferCounter) CompileRegex(operator string) (*regexp.Regexp, error) { var r *regexp.Regexp var err error @@ -64,7 +64,7 @@ func (c *TransferCounter) CompileRegex(operator string) (*regexp.Regexp, error) return r, err } -func (c *TransferCounter) parseLine(content string, r *regexp.Regexp) ([]uint64, error) { +func parseLine(content string, r *regexp.Regexp) ([]uint64, error) { results := make([]uint64, 0, 4) subStrings := r.FindStringSubmatch(content) if len(subStrings) == 0 { @@ -78,9 +78,8 @@ func (c *TransferCounter) parseLine(content string, r *regexp.Regexp) ([]uint64, results = append(results, uint64(num)) } return results, nil - } else { - return results, errors.New("Can't parse Log, with " + content) } + return results, errors.New("Can't parse Log, with " + content) } func forEachLine(filename string, solve func(string) error) error { @@ -116,7 +115,7 @@ func forEachLine(filename string, solve func(string) error) error { func isExpectTime(expect, layout string, isBeforeThanExpect bool) func(time.Time) bool { expectTime, err := time.Parse(layout, expect) if err != nil { - return func(current time.Time) bool { + return func(_ time.Time) bool { return true } } @@ -142,14 +141,13 @@ func currentTime(layout string) func(content string) (time.Time, error) { return time.Parse(layout, result[1]) } else if len(result) == 0 { return time.Time{}, nil - } else { - return time.Time{}, errors.New("There is no valid time in log with " + content) } + return time.Time{}, errors.New("There is no valid time in log with " + content) } } // ParseLog is to parse log for transfer counter. -func (c *TransferCounter) ParseLog(filename, start, end, layout string, r *regexp.Regexp) error { +func (*TransferCounter) ParseLog(filename, start, end, layout string, r *regexp.Regexp) error { afterStart := isExpectTime(start, layout, false) beforeEnd := isExpectTime(end, layout, true) getCurrent := currentTime(layout) @@ -161,7 +159,7 @@ func (c *TransferCounter) ParseLog(filename, start, end, layout string, r *regex } // if current line time between start and end if afterStart(current) && beforeEnd(current) { - results, err := c.parseLine(content, r) + results, err := parseLine(content, r) if err != nil { return err } diff --git a/tools/pd-analysis/analysis/parse_log_test.go b/tools/pd-analysis/analysis/parse_log_test.go index ffdcb2137c0..345f70959f8 100644 --- a/tools/pd-analysis/analysis/parse_log_test.go +++ b/tools/pd-analysis/analysis/parse_log_test.go @@ -23,7 +23,7 @@ import ( func transferCounterParseLog(operator, content string, expect []uint64) bool { r, _ := GetTransferCounter().CompileRegex(operator) - results, _ := GetTransferCounter().parseLine(content, r) + results, _ := parseLine(content, r) if len(results) != len(expect) { return false } diff --git a/tools/pd-api-bench/cases/cases.go b/tools/pd-api-bench/cases/cases.go index 473a11d749a..787132a5816 100644 --- a/tools/pd-api-bench/cases/cases.go +++ b/tools/pd-api-bench/cases/cases.go @@ -246,7 +246,7 @@ func newUpdateGCSafePoint() func() GRPCCase { } } -func (c *updateGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { +func (*updateGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { s := time.Now().Unix() _, err := cli.UpdateGCSafePoint(ctx, uint64(s)) if err != nil { @@ -270,7 +270,7 @@ func newUpdateServiceGCSafePoint() func() GRPCCase { } } -func (c *updateServiceGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { +func (*updateServiceGCSafePoint) Unary(ctx context.Context, cli pd.Client) error { s := time.Now().Unix() id := rand.Int63n(100) + 1 _, err := cli.UpdateServiceGCSafePoint(ctx, strconv.FormatInt(id, 10), id, uint64(s)) @@ -295,7 +295,7 @@ func newGetRegion() func() GRPCCase { } } -func (c *getRegion) Unary(ctx context.Context, cli pd.Client) error { +func (*getRegion) Unary(ctx context.Context, cli pd.Client) error { id := rand.Intn(totalRegion)*4 + 1 _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56)) if err != nil { @@ -319,7 +319,7 @@ func newGetRegionEnableFollower() func() GRPCCase { } } -func (c *getRegionEnableFollower) Unary(ctx context.Context, cli pd.Client) error { +func (*getRegionEnableFollower) Unary(ctx context.Context, cli pd.Client) error { id := rand.Intn(totalRegion)*4 + 1 _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56), pd.WithAllowFollowerHandle()) if err != nil { @@ -372,7 +372,7 @@ func newTso() func() GRPCCase { } } -func (c *tso) Unary(ctx context.Context, cli pd.Client) error { +func (*tso) Unary(ctx context.Context, cli pd.Client) error { _, _, err := cli.GetTS(ctx) if err != nil { return err @@ -395,7 +395,7 @@ func newGetStore() func() GRPCCase { } } -func (c *getStore) Unary(ctx context.Context, cli pd.Client) error { +func (*getStore) Unary(ctx context.Context, cli pd.Client) error { storeIdx := rand.Intn(totalStore) _, err := cli.GetStore(ctx, storesID[storeIdx]) if err != nil { @@ -419,7 +419,7 @@ func newGetStores() func() GRPCCase { } } -func (c *getStores) Unary(ctx context.Context, cli pd.Client) error { +func (*getStores) Unary(ctx context.Context, cli pd.Client) error { _, err := cli.GetAllStores(ctx) if err != nil { return err @@ -449,7 +449,7 @@ func newGetKV() func() ETCDCase { } } -func (c *getKV) Init(ctx context.Context, cli *clientv3.Client) error { +func (*getKV) Init(ctx context.Context, cli *clientv3.Client) error { for i := 0; i < 100; i++ { _, err := cli.Put(ctx, fmt.Sprintf("/test/0001/%4d", i), fmt.Sprintf("%4d", i)) if err != nil { @@ -459,7 +459,7 @@ func (c *getKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } -func (c *getKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*getKV) Unary(ctx context.Context, cli *clientv3.Client) error { _, err := cli.Get(ctx, "/test/0001", clientv3.WithPrefix()) return err } @@ -479,9 +479,9 @@ func newPutKV() func() ETCDCase { } } -func (c *putKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } +func (*putKV) Init(context.Context, *clientv3.Client) error { return nil } -func (c *putKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*putKV) Unary(ctx context.Context, cli *clientv3.Client) error { _, err := cli.Put(ctx, "/test/0001/0000", "test") return err } @@ -501,9 +501,9 @@ func newDeleteKV() func() ETCDCase { } } -func (c *deleteKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } +func (*deleteKV) Init(context.Context, *clientv3.Client) error { return nil } -func (c *deleteKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*deleteKV) Unary(ctx context.Context, cli *clientv3.Client) error { _, err := cli.Delete(ctx, "/test/0001/0000") return err } @@ -523,9 +523,9 @@ func newTxnKV() func() ETCDCase { } } -func (c *txnKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } +func (*txnKV) Init(context.Context, *clientv3.Client) error { return nil } -func (c *txnKV) Unary(ctx context.Context, cli *clientv3.Client) error { +func (*txnKV) Unary(ctx context.Context, cli *clientv3.Client) error { txn := cli.Txn(ctx) txn = txn.If(clientv3.Compare(clientv3.Value("/test/0001/0000"), "=", "test")) txn = txn.Then(clientv3.OpPut("/test/0001/0000", "test2")) diff --git a/tools/pd-api-bench/cases/controller.go b/tools/pd-api-bench/cases/controller.go index d6679cad1d9..42eeafe4597 100644 --- a/tools/pd-api-bench/cases/controller.go +++ b/tools/pd-api-bench/cases/controller.go @@ -64,7 +64,7 @@ func (c *Coordinator) GetHTTPCase(name string) (*Config, error) { if controller, ok := c.http[name]; ok { return controller.GetConfig(), nil } - return nil, errors.Errorf("case %v does not exist.", name) + return nil, errors.Errorf("case %v does not exist", name) } // GetGRPCCase returns the gRPC case config. @@ -74,7 +74,7 @@ func (c *Coordinator) GetGRPCCase(name string) (*Config, error) { if controller, ok := c.grpc[name]; ok { return controller.GetConfig(), nil } - return nil, errors.Errorf("case %v does not exist.", name) + return nil, errors.Errorf("case %v does not exist", name) } // GetETCDCase returns the etcd case config. @@ -84,7 +84,7 @@ func (c *Coordinator) GetETCDCase(name string) (*Config, error) { if controller, ok := c.etcd[name]; ok { return controller.GetConfig(), nil } - return nil, errors.Errorf("case %v does not exist.", name) + return nil, errors.Errorf("case %v does not exist", name) } // GetAllHTTPCases returns the all HTTP case configs. diff --git a/tools/pd-api-bench/config/config.go b/tools/pd-api-bench/config/config.go index 675e665ab0a..d1048c0da72 100644 --- a/tools/pd-api-bench/config/config.go +++ b/tools/pd-api-bench/config/config.go @@ -15,7 +15,6 @@ package config import ( - "github.com/BurntSushi/toml" "github.com/pingcap/log" "github.com/pkg/errors" flag "github.com/spf13/pflag" @@ -73,14 +72,13 @@ func (c *Config) Parse(arguments []string) error { } // Load config file if specified. - var meta *toml.MetaData if c.configFile != "" { - meta, err = configutil.ConfigFromFile(c, c.configFile) + _, err = configutil.ConfigFromFile(c, c.configFile) if err != nil { return err } } - c.Adjust(meta) + c.Adjust() // Parse again to replace with command line options. err = c.flagSet.Parse(arguments) @@ -118,7 +116,7 @@ func (c *Config) InitCoordinator(co *cases.Coordinator) { } // Adjust is used to adjust configurations -func (c *Config) Adjust(meta *toml.MetaData) { +func (c *Config) Adjust() { if len(c.Log.Format) == 0 { c.Log.Format = "text" } diff --git a/tools/pd-backup/pdbackup/backup_test.go b/tools/pd-backup/pdbackup/backup_test.go index b35bf1e8a70..0ab9116ddbe 100644 --- a/tools/pd-backup/pdbackup/backup_test.go +++ b/tools/pd-backup/pdbackup/backup_test.go @@ -83,7 +83,7 @@ func setupServer() (*httptest.Server, *config.Config) { }, } - server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, _ *http.Request) { b, err := json.Marshal(serverConfig) if err != nil { res.WriteHeader(http.StatusInternalServerError) @@ -98,7 +98,7 @@ func setupServer() (*httptest.Server, *config.Config) { return server, serverConfig } -func (s *backupTestSuite) BeforeTest(suiteName, testName string) { +func (s *backupTestSuite) BeforeTest(string, string) { re := s.Require() ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) defer cancel() @@ -124,7 +124,7 @@ func (s *backupTestSuite) BeforeTest(suiteName, testName string) { re.NoError(err) } -func (s *backupTestSuite) AfterTest(suiteName, testName string) { +func (s *backupTestSuite) AfterTest(string, string) { s.etcd.Close() } diff --git a/tools/pd-ctl/pdctl/command/config_command.go b/tools/pd-ctl/pdctl/command/config_command.go index c70c33e26c3..0c3851350cc 100644 --- a/tools/pd-ctl/pdctl/command/config_command.go +++ b/tools/pd-ctl/pdctl/command/config_command.go @@ -212,7 +212,7 @@ func NewDeleteLabelPropertyConfigCommand() *cobra.Command { return sc } -func showConfigCommandFunc(cmd *cobra.Command, args []string) { +func showConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) allR, err := doRequest(cmd, configPrefix, http.MethodGet, header) if err != nil { @@ -268,7 +268,7 @@ var hideConfig = []string{ "scheduler-max-waiting-operator", } -func showScheduleConfigCommandFunc(cmd *cobra.Command, args []string) { +func showScheduleConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) r, err := doRequest(cmd, schedulePrefix, http.MethodGet, header) if err != nil { @@ -278,7 +278,7 @@ func showScheduleConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showReplicationConfigCommandFunc(cmd *cobra.Command, args []string) { +func showReplicationConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) r, err := doRequest(cmd, replicatePrefix, http.MethodGet, header) if err != nil { @@ -288,7 +288,7 @@ func showReplicationConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showLabelPropertyConfigCommandFunc(cmd *cobra.Command, args []string) { +func showLabelPropertyConfigCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, labelPropertyPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get config: %s\n", err) @@ -297,7 +297,7 @@ func showLabelPropertyConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showAllConfigCommandFunc(cmd *cobra.Command, args []string) { +func showAllConfigCommandFunc(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) r, err := doRequest(cmd, configPrefix, http.MethodGet, header) if err != nil { @@ -307,7 +307,7 @@ func showAllConfigCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showClusterVersionCommandFunc(cmd *cobra.Command, args []string) { +func showClusterVersionCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, clusterVersionPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get cluster version: %s\n", err) @@ -316,7 +316,7 @@ func showClusterVersionCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showReplicationModeCommandFunc(cmd *cobra.Command, args []string) { +func showReplicationModeCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, replicationModePrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get replication mode config: %s\n", err) @@ -325,7 +325,7 @@ func showReplicationModeCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showServerCommandFunc(cmd *cobra.Command, args []string) { +func showServerCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, pdServerPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get server config: %s\n", err) @@ -529,7 +529,7 @@ func NewPlacementRulesCommand() *cobra.Command { return c } -func enablePlacementRulesFunc(cmd *cobra.Command, args []string) { +func enablePlacementRulesFunc(cmd *cobra.Command, _ []string) { err := postConfigDataWithPath(cmd, "enable-placement-rules", "true", configPrefix) if err != nil { cmd.Printf("Failed to set config: %s\n", err) @@ -538,7 +538,7 @@ func enablePlacementRulesFunc(cmd *cobra.Command, args []string) { cmd.Println("Success!") } -func disablePlacementRulesFunc(cmd *cobra.Command, args []string) { +func disablePlacementRulesFunc(cmd *cobra.Command, _ []string) { err := postConfigDataWithPath(cmd, "enable-placement-rules", "false", configPrefix) if err != nil { cmd.Printf("Failed to set config: %s\n", err) @@ -547,7 +547,7 @@ func disablePlacementRulesFunc(cmd *cobra.Command, args []string) { cmd.Println("Success!") } -func getPlacementRulesFunc(cmd *cobra.Command, args []string) { +func getPlacementRulesFunc(cmd *cobra.Command, _ []string) { getFlag := func(key string) string { if f := cmd.Flag(key); f != nil { return f.Value.String() @@ -598,7 +598,7 @@ func getPlacementRulesFunc(cmd *cobra.Command, args []string) { cmd.Println("rules saved to file " + file) } -func putPlacementRulesFunc(cmd *cobra.Command, args []string) { +func putPlacementRulesFunc(cmd *cobra.Command, _ []string) { var file string if f := cmd.Flag("in"); f != nil { file = f.Value.String() @@ -712,7 +712,7 @@ func getRuleBundle(cmd *cobra.Command, args []string) { cmd.Printf("rule group saved to file %s\n", file) } -func setRuleBundle(cmd *cobra.Command, args []string) { +func setRuleBundle(cmd *cobra.Command, _ []string) { var file string if f := cmd.Flag("in"); f != nil { file = f.Value.String() @@ -763,7 +763,7 @@ func delRuleBundle(cmd *cobra.Command, args []string) { cmd.Println(res) } -func loadRuleBundle(cmd *cobra.Command, args []string) { +func loadRuleBundle(cmd *cobra.Command, _ []string) { header := buildHeader(cmd) res, err := doRequest(cmd, ruleBundlePrefix, http.MethodGet, header) if err != nil { @@ -788,7 +788,7 @@ func loadRuleBundle(cmd *cobra.Command, args []string) { cmd.Printf("rule group saved to file %s\n", file) } -func saveRuleBundle(cmd *cobra.Command, args []string) { +func saveRuleBundle(cmd *cobra.Command, _ []string) { var file string if f := cmd.Flag("in"); f != nil { file = f.Value.String() diff --git a/tools/pd-ctl/pdctl/command/exit_command.go b/tools/pd-ctl/pdctl/command/exit_command.go index a3d38be97bd..3ead7e54e8e 100644 --- a/tools/pd-ctl/pdctl/command/exit_command.go +++ b/tools/pd-ctl/pdctl/command/exit_command.go @@ -30,6 +30,6 @@ func NewExitCommand() *cobra.Command { return conf } -func exitCommandFunc(cmd *cobra.Command, args []string) { +func exitCommandFunc(*cobra.Command, []string) { os.Exit(0) } diff --git a/tools/pd-ctl/pdctl/command/gc_safepoint_command.go b/tools/pd-ctl/pdctl/command/gc_safepoint_command.go index 80c6328e955..f4a6b6fcfd0 100644 --- a/tools/pd-ctl/pdctl/command/gc_safepoint_command.go +++ b/tools/pd-ctl/pdctl/command/gc_safepoint_command.go @@ -49,7 +49,7 @@ func NewDeleteServiceGCSafepointCommand() *cobra.Command { return l } -func showSSPs(cmd *cobra.Command, args []string) { +func showSSPs(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, serviceGCSafepointPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get service GC safepoint: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/global.go b/tools/pd-ctl/pdctl/command/global.go index 4f20b0b35b4..fa77df6a101 100644 --- a/tools/pd-ctl/pdctl/command/global.go +++ b/tools/pd-ctl/pdctl/command/global.go @@ -126,7 +126,7 @@ var dialClient = &http.Client{ } // RequireHTTPSClient creates a HTTPS client if the related flags are set -func RequireHTTPSClient(cmd *cobra.Command, args []string) error { +func RequireHTTPSClient(cmd *cobra.Command, _ []string) error { caPath, err := cmd.Flags().GetString("cacert") if err == nil && len(caPath) != 0 { certPath, err := cmd.Flags().GetString("cert") diff --git a/tools/pd-ctl/pdctl/command/health_command.go b/tools/pd-ctl/pdctl/command/health_command.go index 1bae871285d..50ac7763d28 100644 --- a/tools/pd-ctl/pdctl/command/health_command.go +++ b/tools/pd-ctl/pdctl/command/health_command.go @@ -34,7 +34,7 @@ func NewHealthCommand() *cobra.Command { return m } -func showHealthCommandFunc(cmd *cobra.Command, args []string) { +func showHealthCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, healthPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Println(err) diff --git a/tools/pd-ctl/pdctl/command/hot_command.go b/tools/pd-ctl/pdctl/command/hot_command.go index f6be9c7176b..77c0ee4d7de 100644 --- a/tools/pd-ctl/pdctl/command/hot_command.go +++ b/tools/pd-ctl/pdctl/command/hot_command.go @@ -107,7 +107,7 @@ func NewHotStoreCommand() *cobra.Command { return cmd } -func showHotStoresCommandFunc(cmd *cobra.Command, args []string) { +func showHotStoresCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, hotStoresPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get store hotspot: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/label_command.go b/tools/pd-ctl/pdctl/command/label_command.go index c0ae3135210..6d95465392f 100644 --- a/tools/pd-ctl/pdctl/command/label_command.go +++ b/tools/pd-ctl/pdctl/command/label_command.go @@ -53,7 +53,7 @@ func NewLabelListStoresCommand() *cobra.Command { return l } -func showLabelsCommandFunc(cmd *cobra.Command, args []string) { +func showLabelsCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, labelsPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get labels: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/member_command.go b/tools/pd-ctl/pdctl/command/member_command.go index c16a879429c..b939935cfb9 100644 --- a/tools/pd-ctl/pdctl/command/member_command.go +++ b/tools/pd-ctl/pdctl/command/member_command.go @@ -89,7 +89,7 @@ func NewLeaderMemberCommand() *cobra.Command { return d } -func showMemberCommandFunc(cmd *cobra.Command, args []string) { +func showMemberCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, membersPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get pd members: %s\n", err) @@ -126,7 +126,7 @@ func deleteMemberByIDCommandFunc(cmd *cobra.Command, args []string) { cmd.Println("Success!") } -func getLeaderMemberCommandFunc(cmd *cobra.Command, args []string) { +func getLeaderMemberCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, leaderMemberPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get the leader of pd members: %s\n", err) @@ -135,7 +135,7 @@ func getLeaderMemberCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func resignLeaderCommandFunc(cmd *cobra.Command, args []string) { +func resignLeaderCommandFunc(cmd *cobra.Command, _ []string) { prefix := leaderMemberPrefix + "/resign" _, err := doRequest(cmd, prefix, http.MethodPost, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/command/min_resolved_ts.go b/tools/pd-ctl/pdctl/command/min_resolved_ts.go index dbf0c47b2de..904f880d82d 100644 --- a/tools/pd-ctl/pdctl/command/min_resolved_ts.go +++ b/tools/pd-ctl/pdctl/command/min_resolved_ts.go @@ -35,7 +35,7 @@ func NewMinResolvedTSCommand() *cobra.Command { } // ShowMinResolvedTS show min resolved ts -func ShowMinResolvedTS(cmd *cobra.Command, args []string) { +func ShowMinResolvedTS(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, minResolvedTSPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get min resolved ts: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/operator.go b/tools/pd-ctl/pdctl/command/operator.go index c57e07db75a..4e7771580de 100644 --- a/tools/pd-ctl/pdctl/command/operator.go +++ b/tools/pd-ctl/pdctl/command/operator.go @@ -375,7 +375,6 @@ func splitRegionCommandFunc(cmd *cobra.Command, args []string) { policy := cmd.Flags().Lookup("policy").Value.String() switch policy { case "scan", "approximate", "usekey": - break default: cmd.Println("Error: unknown policy") return diff --git a/tools/pd-ctl/pdctl/command/ping_command.go b/tools/pd-ctl/pdctl/command/ping_command.go index 6622b079d47..7efa46180d1 100644 --- a/tools/pd-ctl/pdctl/command/ping_command.go +++ b/tools/pd-ctl/pdctl/command/ping_command.go @@ -33,7 +33,7 @@ func NewPingCommand() *cobra.Command { return m } -func showPingCommandFunc(cmd *cobra.Command, args []string) { +func showPingCommandFunc(cmd *cobra.Command, _ []string) { start := time.Now() _, err := doRequest(cmd, pingPrefix, http.MethodGet, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/command/region_command.go b/tools/pd-ctl/pdctl/command/region_command.go index 33191bbe12b..e03de1c62ac 100644 --- a/tools/pd-ctl/pdctl/command/region_command.go +++ b/tools/pd-ctl/pdctl/command/region_command.go @@ -156,7 +156,7 @@ func showRegionCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func scanRegionCommandFunc(cmd *cobra.Command, args []string) { +func scanRegionCommandFunc(cmd *cobra.Command, _ []string) { const limit = 1024 var key []byte for { @@ -533,7 +533,7 @@ func NewRangesWithRangeHolesCommand() *cobra.Command { return r } -func showRangesWithRangeHolesCommandFunc(cmd *cobra.Command, args []string) { +func showRangesWithRangeHolesCommandFunc(cmd *cobra.Command, _ []string) { r, err := doRequest(cmd, regionsRangeHolesPrefix, http.MethodGet, http.Header{}) if err != nil { cmd.Printf("Failed to get range holes: %s\n", err) diff --git a/tools/pd-ctl/pdctl/command/scheduler.go b/tools/pd-ctl/pdctl/command/scheduler.go index 3799c4a820e..d5deba670ad 100644 --- a/tools/pd-ctl/pdctl/command/scheduler.go +++ b/tools/pd-ctl/pdctl/command/scheduler.go @@ -391,7 +391,7 @@ func NewSlowTrendEvictLeaderSchedulerCommand() *cobra.Command { return c } -func addSchedulerForSplitBucketCommandFunc(cmd *cobra.Command, args []string) { +func addSchedulerForSplitBucketCommandFunc(cmd *cobra.Command, _ []string) { input := make(map[string]any) input["name"] = cmd.Name() postJSON(cmd, schedulersPrefix, input) diff --git a/tools/pd-ctl/pdctl/command/store_command.go b/tools/pd-ctl/pdctl/command/store_command.go index 085483cc5df..bc024d5a2e6 100644 --- a/tools/pd-ctl/pdctl/command/store_command.go +++ b/tools/pd-ctl/pdctl/command/store_command.go @@ -675,7 +675,7 @@ func storeCheckCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func showStoresCommandFunc(cmd *cobra.Command, args []string) { +func showStoresCommandFunc(cmd *cobra.Command, _ []string) { prefix := storesPrefix r, err := doRequest(cmd, prefix, http.MethodGet, http.Header{}) if err != nil { @@ -706,7 +706,7 @@ func showAllStoresLimitCommandFunc(cmd *cobra.Command, args []string) { cmd.Println(r) } -func removeTombStoneCommandFunc(cmd *cobra.Command, args []string) { +func removeTombStoneCommandFunc(cmd *cobra.Command, _ []string) { prefix := path.Join(storesPrefix, "remove-tombstone") _, err := doRequest(cmd, prefix, http.MethodDelete, http.Header{}) if err != nil { diff --git a/tools/pd-ctl/pdctl/command/unsafe_command.go b/tools/pd-ctl/pdctl/command/unsafe_command.go index 66ef8e6c934..04d272385e7 100644 --- a/tools/pd-ctl/pdctl/command/unsafe_command.go +++ b/tools/pd-ctl/pdctl/command/unsafe_command.go @@ -106,7 +106,7 @@ func removeFailedStoresCommandFunc(cmd *cobra.Command, args []string) { postJSON(cmd, prefix, postInput) } -func removeFailedStoresShowCommandFunc(cmd *cobra.Command, args []string) { +func removeFailedStoresShowCommandFunc(cmd *cobra.Command, _ []string) { var resp string var err error prefix := fmt.Sprintf("%s/remove-failed-stores/show", unsafePrefix) diff --git a/tools/pd-ctl/pdctl/ctl.go b/tools/pd-ctl/pdctl/ctl.go index 5790911d79f..f8eaff5e76e 100644 --- a/tools/pd-ctl/pdctl/ctl.go +++ b/tools/pd-ctl/pdctl/ctl.go @@ -86,7 +86,7 @@ func MainStart(args []string) { // TODO: deprecated rootCmd.Flags().BoolP("detach", "d", true, "Run pdctl without readline.") - rootCmd.Run = func(cmd *cobra.Command, args []string) { + rootCmd.Run = func(cmd *cobra.Command, _ []string) { if v, err := cmd.Flags().GetBool("version"); err == nil && v { versioninfo.Print() return diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index 6776c9851b3..07a7c2aa990 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -568,7 +568,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) re.Contains(string(output), "Success!") // test show - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) f, _ := os.CreateTemp("/tmp", "pd_tests") fname := f.Name() @@ -576,7 +576,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) defer os.RemoveAll(fname) // test load - rules := suite.checkLoadRule(re, pdAddr, fname, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) + rules := checkLoadRule(re, pdAddr, fname, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}) // test save rules = append(rules, placement.Rule{ @@ -596,11 +596,11 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) re.NoError(err) // test show group - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}, {placement.DefaultGroupID, "test1"}}, "--group=pd") + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}, {placement.DefaultGroupID, "test1"}}, "--group=pd") // test rule region detail pdTests.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b")) - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}, "--region=1", "--detail") + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, placement.DefaultRuleID}}, "--region=1", "--detail") // test delete // need clear up args, so create new a cobra.Command. Otherwise gourp still exists. @@ -609,7 +609,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) os.WriteFile(fname, b, 0600) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "save", "--in="+fname) re.NoError(err) - suite.checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, "test1"}}, "--group=pd") + checkShowRuleKey(re, pdAddr, [][2]string{{placement.DefaultGroupID, "test1"}}, "--group=pd") } func (suite *configTestSuite) TestPlacementRuleGroups() { @@ -724,7 +724,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus defer os.RemoveAll(fname) // test load - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -736,7 +736,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(os.WriteFile(fname, b, 0600)) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: placement.DefaultGroupID, Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -745,7 +745,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", placement.DefaultGroupID) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -757,7 +757,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(os.WriteFile(fname, b, 0600)) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "set", "--in="+fname) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -768,7 +768,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus bundles := []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, } - suite.checkLoadRuleBundle(re, pdAddr, fname, bundles) + checkLoadRuleBundle(re, pdAddr, fname, bundles) // test save bundle.Rules = []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}} @@ -778,7 +778,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus re.NoError(os.WriteFile(fname, b, 0600)) _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname) re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pe", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pe", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -791,7 +791,7 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "save", "--in="+fname, "--partial") re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pf", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pf", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) @@ -810,12 +810,12 @@ func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestClus _, err = tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "rule-bundle", "delete", "--regexp", ".*f") re.NoError(err) - suite.checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ + checkLoadRuleBundle(re, pdAddr, fname, []placement.GroupBundle{ {ID: "pd", Index: 0, Override: false, Rules: []*placement.Rule{{GroupID: "pd", ID: placement.DefaultRuleID, Role: placement.Voter, Count: 3}}}, }) } -func (suite *configTestSuite) checkLoadRuleBundle(re *require.Assertions, pdAddr string, fname string, expectValues []placement.GroupBundle) { +func checkLoadRuleBundle(re *require.Assertions, pdAddr string, fname string, expectValues []placement.GroupBundle) { var bundles []placement.GroupBundle cmd := ctl.GetRootCmd() testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server @@ -828,7 +828,7 @@ func (suite *configTestSuite) checkLoadRuleBundle(re *require.Assertions, pdAddr assertBundles(re, bundles, expectValues) } -func (suite *configTestSuite) checkLoadRule(re *require.Assertions, pdAddr string, fname string, expectValues [][2]string) []placement.Rule { +func checkLoadRule(re *require.Assertions, pdAddr string, fname string, expectValues [][2]string) []placement.Rule { var rules []placement.Rule cmd := ctl.GetRootCmd() testutil.Eventually(re, func() bool { // wait for the config to be synced to the scheduling server @@ -844,7 +844,7 @@ func (suite *configTestSuite) checkLoadRule(re *require.Assertions, pdAddr strin return rules } -func (suite *configTestSuite) checkShowRuleKey(re *require.Assertions, pdAddr string, expectValues [][2]string, opts ...string) { +func checkShowRuleKey(re *require.Assertions, pdAddr string, expectValues [][2]string, opts ...string) { var ( rules []placement.Rule fit placement.RegionFit diff --git a/tools/pd-ctl/tests/global_test.go b/tools/pd-ctl/tests/global_test.go index 14b7aafdccd..f4f55e2af89 100644 --- a/tools/pd-ctl/tests/global_test.go +++ b/tools/pd-ctl/tests/global_test.go @@ -34,7 +34,7 @@ const pdControlCallerID = "pd-ctl" func TestSendAndGetComponent(t *testing.T) { re := require.New(t) - handler := func(ctx context.Context, s *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { + handler := func(context.Context, *server.Server) (http.Handler, apiutil.APIServiceGroup, error) { mux := http.NewServeMux() mux.HandleFunc("/pd/api/v1/health", func(w http.ResponseWriter, r *http.Request) { callerID := apiutil.GetCallerIDOnHTTP(r) diff --git a/tools/pd-ctl/tests/hot/hot_test.go b/tools/pd-ctl/tests/hot/hot_test.go index 9d8dbbd123a..7661704aa41 100644 --- a/tools/pd-ctl/tests/hot/hot_test.go +++ b/tools/pd-ctl/tests/hot/hot_test.go @@ -51,7 +51,7 @@ func TestHotTestSuite(t *testing.T) { func (suite *hotTestSuite) SetupSuite() { suite.env = pdTests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { conf.Schedule.MaxStoreDownTime.Duration = time.Hour conf.Schedule.HotRegionCacheHitsThreshold = 0 }, @@ -398,7 +398,7 @@ func TestHistoryHotRegions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := pdTests.NewTestCluster(ctx, 1, - func(cfg *config.Config, serverName string) { + func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 cfg.Schedule.HotRegionsWriteInterval.Duration = 1000 * time.Millisecond cfg.Schedule.HotRegionsReservedDays = 1 @@ -520,7 +520,7 @@ func TestBuckets(t *testing.T) { statistics.Denoising = false ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, serverName string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 }) + cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Schedule.HotRegionCacheHitsThreshold = 0 }) re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() diff --git a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go index 5d85f35dacf..87fd17a97d4 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go @@ -100,7 +100,7 @@ func TestSplitKeyspaceGroup(t *testing.T) { for i := 0; i < 129; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -155,7 +155,7 @@ func TestExternalAllocNodeWhenStart(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -195,7 +195,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -299,7 +299,7 @@ func TestMergeKeyspaceGroup(t *testing.T) { for i := 0; i < 129; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -418,7 +418,7 @@ func TestKeyspaceGroupState(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -509,7 +509,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { for i := 0; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) diff --git a/tools/pd-ctl/tests/keyspace/keyspace_test.go b/tools/pd-ctl/tests/keyspace/keyspace_test.go index 4c1fb2aadd5..54c25fc2099 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_test.go @@ -47,7 +47,7 @@ func TestKeyspace(t *testing.T) { for i := 1; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, serverName string) { + tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) diff --git a/tools/pd-ctl/tests/label/label_test.go b/tools/pd-ctl/tests/label/label_test.go index 9ba6f267ae1..f7370a71872 100644 --- a/tools/pd-ctl/tests/label/label_test.go +++ b/tools/pd-ctl/tests/label/label_test.go @@ -34,7 +34,7 @@ func TestLabel(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, serverName string) { cfg.Replication.StrictlyMatchLabel = false }) + cluster, err := pdTests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.StrictlyMatchLabel = false }) re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() diff --git a/tools/pd-ctl/tests/operator/operator_test.go b/tools/pd-ctl/tests/operator/operator_test.go index 5af73184076..7e5d390c4ce 100644 --- a/tools/pd-ctl/tests/operator/operator_test.go +++ b/tools/pd-ctl/tests/operator/operator_test.go @@ -43,7 +43,7 @@ func TestOperatorTestSuite(t *testing.T) { func (suite *operatorTestSuite) SetupSuite() { suite.env = pdTests.NewSchedulingTestEnvironment(suite.T(), - func(conf *config.Config, serverName string) { + func(conf *config.Config, _ string) { // TODO: enable placement rules conf.Replication.MaxReplicas = 2 conf.Replication.EnablePlacementRules = false diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index b8cd5c13a79..afb97401168 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -581,7 +581,7 @@ func TestStoreTLS(t *testing.T) { CertFile: filepath.Join(certPath, "pd-server.pem"), TrustedCAFile: filepath.Join(certPath, "ca.pem"), } - cluster, err := pdTests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + cluster, err := pdTests.NewTestCluster(ctx, 1, func(conf *config.Config, _ string) { conf.Security.TLSConfig = grpcutil.TLSConfig{ KeyPath: tlsInfo.KeyFile, CertPath: tlsInfo.CertFile, diff --git a/tools/pd-heartbeat-bench/main.go b/tools/pd-heartbeat-bench/main.go index 44d1b001269..ec5e2506e6b 100644 --- a/tools/pd-heartbeat-bench/main.go +++ b/tools/pd-heartbeat-bench/main.go @@ -192,7 +192,7 @@ type Regions struct { updateFlow []int } -func (rs *Regions) init(cfg *config.Config, options *config.Options) { +func (rs *Regions) init(cfg *config.Config) { rs.regions = make([]*pdpb.RegionHeartbeatRequest, 0, cfg.RegionCount) rs.updateRound = 0 @@ -507,7 +507,7 @@ func main() { initClusterID(ctx, cli) go runHTTPServer(cfg, options) regions := new(Regions) - regions.init(cfg, options) + regions.init(cfg) log.Info("finish init regions") stores := newStores(cfg.StoreCount) stores.update(regions) diff --git a/tools/pd-simulator/simulator/cases/add_nodes.go b/tools/pd-simulator/simulator/cases/add_nodes.go index 833ead89f53..241b34a9473 100644 --- a/tools/pd-simulator/simulator/cases/add_nodes.go +++ b/tools/pd-simulator/simulator/cases/add_nodes.go @@ -55,7 +55,7 @@ func newAddNodes() *Case { } threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := true leaderCounts := make([]int, 0, storeNum) regionCounts := make([]int, 0, storeNum) diff --git a/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go b/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go index 410d5e984c7..59b0b54e1ca 100644 --- a/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go +++ b/tools/pd-simulator/simulator/cases/add_nodes_dynamic.go @@ -73,7 +73,7 @@ func newAddNodesDynamic() *Case { simCase.Events = []EventDescriptor{e} threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := numNodes == storeNum leaderCounts := make([]int, 0, numNodes) regionCounts := make([]int, 0, numNodes) diff --git a/tools/pd-simulator/simulator/cases/balance_leader.go b/tools/pd-simulator/simulator/cases/balance_leader.go index 8f2b87e3180..bbc7ce97f68 100644 --- a/tools/pd-simulator/simulator/cases/balance_leader.go +++ b/tools/pd-simulator/simulator/cases/balance_leader.go @@ -51,7 +51,7 @@ func newBalanceLeader() *Case { } threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := true leaderCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/cases/balance_region.go b/tools/pd-simulator/simulator/cases/balance_region.go index 0a013cf3876..3b0c46f1670 100644 --- a/tools/pd-simulator/simulator/cases/balance_region.go +++ b/tools/pd-simulator/simulator/cases/balance_region.go @@ -59,7 +59,7 @@ func newRedundantBalanceRegion() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) diff --git a/tools/pd-simulator/simulator/cases/delete_nodes.go b/tools/pd-simulator/simulator/cases/delete_nodes.go index 33f7ada14a0..4ba8e5064a4 100644 --- a/tools/pd-simulator/simulator/cases/delete_nodes.go +++ b/tools/pd-simulator/simulator/cases/delete_nodes.go @@ -72,7 +72,7 @@ func newDeleteNodes() *Case { simCase.Events = []EventDescriptor{e} threshold := 0.05 - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := numNodes == noEmptyStoreNum leaderCounts := make([]int, 0, numNodes) regionCounts := make([]int, 0, numNodes) diff --git a/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go b/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go index bd056bdf9c1..7fa50e56197 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go +++ b/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go @@ -62,7 +62,7 @@ func newLabelNotMatch1() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) @@ -128,7 +128,7 @@ func newLabelIsolation1() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) @@ -189,7 +189,7 @@ func newLabelIsolation2() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) diff --git a/tools/pd-simulator/simulator/cases/diagnose_rule.go b/tools/pd-simulator/simulator/cases/diagnose_rule.go index 6cd76c854b7..15c5942d810 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_rule.go +++ b/tools/pd-simulator/simulator/cases/diagnose_rule.go @@ -100,7 +100,7 @@ func newRule1() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) @@ -179,7 +179,7 @@ func newRule2() *Case { storesLastUpdateTime := make([]int64, storeNum+1) storeLastAvailable := make([]uint64, storeNum+1) - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(_ *core.RegionsInfo, stats []info.StoreStats) bool { res := true curTime := time.Now().Unix() storesAvailable := make([]uint64, 0, storeNum+1) diff --git a/tools/pd-simulator/simulator/cases/event_inner.go b/tools/pd-simulator/simulator/cases/event_inner.go index 3edf26b72a5..72521584e88 100644 --- a/tools/pd-simulator/simulator/cases/event_inner.go +++ b/tools/pd-simulator/simulator/cases/event_inner.go @@ -25,7 +25,7 @@ type WriteFlowOnSpotDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *WriteFlowOnSpotDescriptor) Type() string { +func (*WriteFlowOnSpotDescriptor) Type() string { return "write-flow-on-spot" } @@ -35,7 +35,7 @@ type WriteFlowOnRegionDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *WriteFlowOnRegionDescriptor) Type() string { +func (*WriteFlowOnRegionDescriptor) Type() string { return "write-flow-on-region" } @@ -45,7 +45,7 @@ type ReadFlowOnRegionDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *ReadFlowOnRegionDescriptor) Type() string { +func (*ReadFlowOnRegionDescriptor) Type() string { return "read-flow-on-region" } @@ -55,7 +55,7 @@ type AddNodesDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *AddNodesDescriptor) Type() string { +func (*AddNodesDescriptor) Type() string { return "add-nodes" } @@ -65,6 +65,6 @@ type DeleteNodesDescriptor struct { } // Type implements the EventDescriptor interface. -func (w *DeleteNodesDescriptor) Type() string { +func (*DeleteNodesDescriptor) Type() string { return "delete-nodes" } diff --git a/tools/pd-simulator/simulator/cases/hot_read.go b/tools/pd-simulator/simulator/cases/hot_read.go index 9df4f8796e8..d4ec6831d95 100644 --- a/tools/pd-simulator/simulator/cases/hot_read.go +++ b/tools/pd-simulator/simulator/cases/hot_read.go @@ -67,12 +67,12 @@ func newHotRead() *Case { } } e := &ReadFlowOnRegionDescriptor{} - e.Step = func(tick int64) map[uint64]int64 { + e.Step = func(int64) map[uint64]int64 { return readFlow } simCase.Events = []EventDescriptor{e} // Checker description - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { leaderCount := make([]int, storeNum) for id := range readFlow { leaderStore := regions.GetRegion(id).GetLeader().GetStoreId() diff --git a/tools/pd-simulator/simulator/cases/hot_write.go b/tools/pd-simulator/simulator/cases/hot_write.go index 8efe32c5657..8428afa75b5 100644 --- a/tools/pd-simulator/simulator/cases/hot_write.go +++ b/tools/pd-simulator/simulator/cases/hot_write.go @@ -66,14 +66,14 @@ func newHotWrite() *Case { } } e := &WriteFlowOnRegionDescriptor{} - e.Step = func(tick int64) map[uint64]int64 { + e.Step = func(int64) map[uint64]int64 { return writeFlow } simCase.Events = []EventDescriptor{e} // Checker description - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { leaderCount := make([]int, storeNum) peerCount := make([]int, storeNum) for id := range writeFlow { diff --git a/tools/pd-simulator/simulator/cases/import_data.go b/tools/pd-simulator/simulator/cases/import_data.go index 0e7f7770a48..6cf3b79a736 100644 --- a/tools/pd-simulator/simulator/cases/import_data.go +++ b/tools/pd-simulator/simulator/cases/import_data.go @@ -78,7 +78,7 @@ func newImportData() *Case { checkCount := uint64(0) var newRegionCount [][3]int var allRegionCount [][3]int - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { leaderDist := make(map[uint64]int) peerDist := make(map[uint64]int) leaderTotal := 0 diff --git a/tools/pd-simulator/simulator/cases/makeup_down_replica.go b/tools/pd-simulator/simulator/cases/makeup_down_replica.go index 57eb2dd1f53..86c9b4cac1d 100644 --- a/tools/pd-simulator/simulator/cases/makeup_down_replica.go +++ b/tools/pd-simulator/simulator/cases/makeup_down_replica.go @@ -64,7 +64,7 @@ func newMakeupDownReplicas() *Case { } simCase.Events = []EventDescriptor{e} - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { sum := 0 regionCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/cases/region_merge.go b/tools/pd-simulator/simulator/cases/region_merge.go index 501803d439e..3d5d57f804f 100644 --- a/tools/pd-simulator/simulator/cases/region_merge.go +++ b/tools/pd-simulator/simulator/cases/region_merge.go @@ -54,7 +54,7 @@ func newRegionMerge() *Case { // Checker description threshold := 0.05 mergeRatio := 4 // when max-merge-region-size is 20, per region will reach 40MB - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { sum := 0 regionCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/cases/region_split.go b/tools/pd-simulator/simulator/cases/region_split.go index 6a69386cb6b..b85cd319494 100644 --- a/tools/pd-simulator/simulator/cases/region_split.go +++ b/tools/pd-simulator/simulator/cases/region_split.go @@ -48,7 +48,7 @@ func newRegionSplit() *Case { simCase.RegionSplitKeys = 10000 // Events description e := &WriteFlowOnSpotDescriptor{} - e.Step = func(tick int64) map[string]int64 { + e.Step = func(int64) map[string]int64 { return map[string]int64{ "foobar": 8 * units.MiB, } @@ -56,7 +56,7 @@ func newRegionSplit() *Case { simCase.Events = []EventDescriptor{e} // Checker description - simCase.Checker = func(regions *core.RegionsInfo, stats []info.StoreStats) bool { + simCase.Checker = func(regions *core.RegionsInfo, _ []info.StoreStats) bool { res := true regionCounts := make([]int, 0, storeNum) for i := 1; i <= storeNum; i++ { diff --git a/tools/pd-simulator/simulator/client.go b/tools/pd-simulator/simulator/client.go index 81453307afa..808c991e97f 100644 --- a/tools/pd-simulator/simulator/client.go +++ b/tools/pd-simulator/simulator/client.go @@ -380,7 +380,7 @@ func (c *client) StoreHeartbeat(ctx context.Context, stats *pdpb.StoreStats) err return nil } -func (c *client) RegionHeartbeat(ctx context.Context, region *core.RegionInfo) error { +func (c *client) RegionHeartbeat(_ context.Context, region *core.RegionInfo) error { c.reportRegionHeartbeatCh <- region return nil } diff --git a/tools/pd-simulator/simulator/task.go b/tools/pd-simulator/simulator/task.go index b1c609b503d..a19854b53ba 100644 --- a/tools/pd-simulator/simulator/task.go +++ b/tools/pd-simulator/simulator/task.go @@ -261,7 +261,7 @@ type transferLeader struct { toPeers []*metapb.Peer } -func (t *transferLeader) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (t *transferLeader) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true toPeer := t.toPeers[0] // TODO: Support selection logic if peer := region.GetPeer(toPeer.GetId()); peer == nil || peer.GetRole() != toPeer.GetRole() || core.IsLearner(peer) { @@ -313,7 +313,7 @@ type promoteLearner struct { peer *metapb.Peer } -func (pl *promoteLearner) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (pl *promoteLearner) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true peer := region.GetPeer(pl.peer.GetId()) opts := checkAndCreateChangePeerOption(region, peer, metapb.PeerRole_Learner, metapb.PeerRole_Voter) @@ -327,7 +327,7 @@ type demoteVoter struct { peer *metapb.Peer } -func (dv *demoteVoter) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (dv *demoteVoter) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true peer := region.GetPeer(dv.peer.GetId()) opts := checkAndCreateChangePeerOption(region, peer, metapb.PeerRole_Voter, metapb.PeerRole_Learner) @@ -342,7 +342,7 @@ type changePeerV2Enter struct { demoteVoters []*metapb.Peer } -func (ce *changePeerV2Enter) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (ce *changePeerV2Enter) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true var opts []core.RegionCreateOption for _, pl := range ce.promoteLearners { @@ -367,7 +367,7 @@ func (ce *changePeerV2Enter) tick(engine *RaftEngine, region *core.RegionInfo) ( type changePeerV2Leave struct{} -func (cl *changePeerV2Leave) tick(engine *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { +func (*changePeerV2Leave) tick(_ *RaftEngine, region *core.RegionInfo) (newRegion *core.RegionInfo, isFinished bool) { isFinished = true var opts []core.RegionCreateOption for _, peer := range region.GetPeers() { diff --git a/tools/pd-tso-bench/main.go b/tools/pd-tso-bench/main.go index 236e78c7808..b4101bda270 100644 --- a/tools/pd-tso-bench/main.go +++ b/tools/pd-tso-bench/main.go @@ -382,10 +382,10 @@ func reqWorker(ctx context.Context, pdClients []pd.Client, clientIdx int, durCh i := 0 for ; i < maxRetryTime; i++ { + var ticker *time.Ticker if *maxTSOSendIntervalMilliseconds > 0 { sleepBeforeGetTS := time.Duration(rand.Intn(*maxTSOSendIntervalMilliseconds)) * time.Millisecond - ticker := time.NewTicker(sleepBeforeGetTS) - defer ticker.Stop() + ticker = time.NewTicker(sleepBeforeGetTS) select { case <-reqCtx.Done(): case <-ticker.C: @@ -394,9 +394,11 @@ func reqWorker(ctx context.Context, pdClients []pd.Client, clientIdx int, durCh } _, _, err = pdCli.GetLocalTS(reqCtx, *dcLocation) if errors.Cause(err) == context.Canceled { + ticker.Stop() return } if err == nil { + ticker.Stop() break } log.Error(fmt.Sprintf("%v", err)) From d081d955aca6725b0c3f5fe48cc74070d6d3ee98 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 3 Apr 2024 10:58:09 +0800 Subject: [PATCH 2/8] fix suite Signed-off-by: Ryan Leung --- pkg/schedule/splitter/region_splitter_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/schedule/splitter/region_splitter_test.go b/pkg/schedule/splitter/region_splitter_test.go index 2dbadf6701c..99fd53df1e5 100644 --- a/pkg/schedule/splitter/region_splitter_test.go +++ b/pkg/schedule/splitter/region_splitter_test.go @@ -76,7 +76,7 @@ func (suite *regionSplitterTestSuite) SetupSuite() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) } -func (suite *regionSplitterTestSuite) TearDownTest() { +func (suite *regionSplitterTestSuite) TearDownSuite() { suite.cancel() } From 07f5c949723c603ea26604c853d71234d8f80565 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Sun, 7 Apr 2024 10:27:27 +0800 Subject: [PATCH 3/8] remove revive toml Signed-off-by: Ryan Leung --- revive.toml | 49 ------------------------------------------------- 1 file changed, 49 deletions(-) delete mode 100644 revive.toml diff --git a/revive.toml b/revive.toml deleted file mode 100644 index a7ec9c09172..00000000000 --- a/revive.toml +++ /dev/null @@ -1,49 +0,0 @@ -ignoreGeneratedHeader = false -severity = "error" -confidence = 0.8 -errorCode = 1 -warningCode = 0 - -[rule.blank-imports] -[rule.context-as-argument] -[rule.dot-imports] -[rule.error-return] -[rule.error-strings] -[rule.error-naming] -[rule.exported] -[rule.if-return] -[rule.var-naming] -[rule.package-comments] -[rule.range] -[rule.receiver-naming] -[rule.indent-error-flow] -[rule.empty-block] -[rule.superfluous-else] -[rule.modifies-parameter] -[rule.confusing-naming] -[rule.confusing-results] - -# It generates unnecessary reports -#[rule.flag-parameter] - -# Currently this makes too much noise, but should add it in -# and perhaps ignore it in a few files -#[rule.unused-parameter] -# severity = "warning" -#[rule.deep-exit] -# severity = "warning" - - -# Already checked by megacheck -# [rule.unreachable-code] - -# Adding these will slow down the linter -# They are already provided by megacheck -# [rule.unexported-return] -# [rule.time-naming] -# [rule.errorf] - -# Adding these will slow down the linter -# Not sure if they are already provided by megacheck -# [rule.var-declaration] -# [rule.context-keys-type] From 5748dca4d467ff8b31187d22dc147af8bf81b1d4 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Sun, 7 Apr 2024 11:53:18 +0800 Subject: [PATCH 4/8] fix more Signed-off-by: Ryan Leung --- client/resource_group/controller/limiter.go | 1 - pkg/cgroup/cgroup.go | 5 -- pkg/cgroup/cgroup_memory.go | 1 - pkg/schedule/operator/operator.go | 1 - pkg/schedule/operator/operator_test.go | 51 +++++++++---------- .../placement/region_rule_cache_test.go | 3 +- plugin/scheduler_example/evict_leader.go | 2 - server/api/region_test.go | 18 ++++--- server/cluster/cluster_test.go | 44 ++++++++-------- server/grpc_service.go | 7 ++- server/server.go | 1 - tests/integrations/client/gc_client_test.go | 2 +- .../mcs/discovery/register_test.go | 8 ++- .../mcs/keyspace/tso_keyspace_group_test.go | 32 ++++++++++-- .../resourcemanager/resource_manager_test.go | 1 - tools/pd-api-bench/cases/cases.go | 15 +++--- tools/pd-api-bench/main.go | 1 - tools/pd-simulator/main.go | 7 ++- 18 files changed, 108 insertions(+), 92 deletions(-) diff --git a/client/resource_group/controller/limiter.go b/client/resource_group/controller/limiter.go index 9b343350d75..230ad46ecf1 100644 --- a/client/resource_group/controller/limiter.go +++ b/client/resource_group/controller/limiter.go @@ -218,7 +218,6 @@ func (lim *Limiter) Reserve(ctx context.Context, waitDuration time.Duration, now } // SetupNotificationThreshold enables the notification at the given threshold. -// FIXME: is it expected? func (lim *Limiter) SetupNotificationThreshold(threshold float64) { lim.mu.Lock() defer lim.mu.Unlock() diff --git a/pkg/cgroup/cgroup.go b/pkg/cgroup/cgroup.go index e45dcbc0929..133bd3158c8 100644 --- a/pkg/cgroup/cgroup.go +++ b/pkg/cgroup/cgroup.go @@ -143,7 +143,6 @@ func combineErrors(err1, err2 error) error { func readFile(filepath string) (res []byte, err error) { var f *os.File - //nolint:gosec f, err = os.Open(filepath) if err != nil { return nil, err @@ -185,7 +184,6 @@ func controllerMatch(field string, controller string) bool { // The controller is defined via either type `memory` for cgroup v1 or via empty type for cgroup v2, // where the type is the second field in /proc/[pid]/cgroup file func detectControlPath(cgroupFilePath string, controller string) (string, error) { - //nolint:gosec cgroup, err := os.Open(cgroupFilePath) if err != nil { return "", errors.Wrapf(err, "failed to read %s cgroup from cgroups file: %s", controller, cgroupFilePath) @@ -229,7 +227,6 @@ func detectControlPath(cgroupFilePath string, controller string) (string, error) // See http://man7.org/linux/man-pages/man5/proc.5.html for `mountinfo` format. func getCgroupDetails(mountInfoPath string, cRoot string, controller string) (mount []string, version []int, err error) { - //nolint:gosec info, err := os.Open(mountInfoPath) if err != nil { return nil, nil, errors.Wrapf(err, "failed to read mounts info from file: %s", mountInfoPath) @@ -411,7 +408,6 @@ func detectCPUQuotaInV2(cRoot string) (period, quota int64, err error) { func detectCPUUsageInV2(cRoot string) (stime, utime uint64, err error) { statFilePath := filepath.Join(cRoot, cgroupV2CPUStat) var stat *os.File - //nolint:gosec stat, err = os.Open(statFilePath) if err != nil { return 0, 0, errors.Wrapf(err, "can't read cpu usage from cgroup v2 at %s", statFilePath) @@ -444,7 +440,6 @@ func detectCPUUsageInV2(cRoot string) (stime, utime uint64, err error) { func readInt64Value(root, filename string, cgVersion int) (value uint64, err error) { filePath := filepath.Join(root, filename) - //nolint:gosec file, err := os.Open(filePath) if err != nil { return 0, errors.Wrapf(err, "can't read %s from cgroup v%d", filename, cgVersion) diff --git a/pkg/cgroup/cgroup_memory.go b/pkg/cgroup/cgroup_memory.go index fb8e8f212dc..2a6d581023e 100644 --- a/pkg/cgroup/cgroup_memory.go +++ b/pkg/cgroup/cgroup_memory.go @@ -177,7 +177,6 @@ func detectMemInactiveFileUsageInV2(root string) (uint64, error) { func detectMemStatValue(cRoot, filename, key string, cgVersion int) (value uint64, err error) { statFilePath := filepath.Join(cRoot, filename) - //nolint:gosec stat, err := os.Open(statFilePath) if err != nil { return 0, errors.Wrapf(err, "can't read file %s from cgroup v%d", filename, cgVersion) diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index 8c0986218bc..b87a050969f 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -531,7 +531,6 @@ const ( mockBrief = "test" ) -// nolint // NewTestOperator creates a test operator, only used for unit test. func NewTestOperator(regionID uint64, regionEpoch *metapb.RegionEpoch, kind OpKind, steps ...OpStep) *Operator { // OpSteps can not be empty for test. diff --git a/pkg/schedule/operator/operator_test.go b/pkg/schedule/operator/operator_test.go index 809430caeb2..693f5c17475 100644 --- a/pkg/schedule/operator/operator_test.go +++ b/pkg/schedule/operator/operator_test.go @@ -96,11 +96,6 @@ func (suite *operatorTestSuite) TestOperatorStep() { re.True(RemovePeer{FromStore: 3}.IsFinish(region)) } -// nolint -func newTestOperator(regionID uint64, kind OpKind, steps ...OpStep) *Operator { - return NewTestOperator(regionID, &metapb.RegionEpoch{}, kind, steps...) -} - func checkSteps(re *require.Assertions, op *Operator, steps []OpStep) { re.Len(steps, op.Len()) for i := range steps { @@ -117,7 +112,7 @@ func (suite *operatorTestSuite) TestOperator() { TransferLeader{FromStore: 3, ToStore: 1}, RemovePeer{FromStore: 3}, } - op := newTestOperator(1, OpAdmin|OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpAdmin|OpLeader|OpRegion, steps...) re.Equal(constant.Urgent, op.GetPriorityLevel()) checkSteps(re, op, steps) op.Start() @@ -133,7 +128,7 @@ func (suite *operatorTestSuite) TestOperator() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op = newTestOperator(1, OpLeader|OpRegion, steps...) + op = NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(constant.Medium, op.GetPriorityLevel()) checkSteps(re, op, steps) op.Start() @@ -150,7 +145,7 @@ func (suite *operatorTestSuite) TestOperator() { // check short timeout for transfer leader only operators. steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} - op = newTestOperator(1, OpLeader, steps...) + op = NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader, steps...) op.Start() re.False(op.CheckTimeout()) op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) @@ -310,7 +305,7 @@ func (suite *operatorTestSuite) TestCheckSuccess() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.False(op.CheckSuccess()) re.True(op.Start()) @@ -325,7 +320,7 @@ func (suite *operatorTestSuite) TestCheckSuccess() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) op.currentStep = int32(len(op.steps)) re.Equal(CREATED, op.Status()) re.False(op.CheckSuccess()) @@ -343,7 +338,7 @@ func (suite *operatorTestSuite) TestCheckTimeout() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.True(op.Start()) op.currentStep = int32(len(op.steps)) @@ -356,7 +351,7 @@ func (suite *operatorTestSuite) TestCheckTimeout() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(CREATED, op.Status()) re.True(op.Start()) op.currentStep = int32(len(op.steps)) @@ -373,7 +368,7 @@ func (suite *operatorTestSuite) TestStart() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.Equal(0, op.GetStartTime().Nanosecond()) re.Equal(CREATED, op.Status()) re.True(op.Start()) @@ -388,7 +383,7 @@ func (suite *operatorTestSuite) TestCheckExpired() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.False(op.CheckExpired()) re.Equal(CREATED, op.Status()) op.SetStatusReachTime(CREATED, time.Now().Add(-OperatorExpireTime)) @@ -405,7 +400,7 @@ func (suite *operatorTestSuite) TestCheck() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(2, OpLeader|OpRegion, steps...) + op := NewTestOperator(2, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) @@ -422,7 +417,7 @@ func (suite *operatorTestSuite) TestCheck() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) @@ -437,7 +432,7 @@ func (suite *operatorTestSuite) TestCheck() { TransferLeader{FromStore: 2, ToStore: 1}, RemovePeer{FromStore: 2}, } - op := newTestOperator(1, OpLeader|OpRegion, steps...) + op := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) re.True(op.Start()) re.NotNil(op.Check(region)) re.Equal(STARTED, op.Status()) @@ -455,28 +450,28 @@ func (suite *operatorTestSuite) TestSchedulerKind() { expect OpKind }{ { - op: newTestOperator(1, OpAdmin|OpMerge|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpAdmin|OpMerge|OpRegion), expect: OpAdmin, }, { - op: newTestOperator(1, OpMerge|OpLeader|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpMerge|OpLeader|OpRegion), expect: OpMerge, }, { - op: newTestOperator(1, OpReplica|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpReplica|OpRegion), expect: OpReplica, }, { - op: newTestOperator(1, OpSplit|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpSplit|OpRegion), expect: OpSplit, }, { - op: newTestOperator(1, OpRange|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpRange|OpRegion), expect: OpRange, }, { - op: newTestOperator(1, OpHotRegion|OpLeader|OpRegion), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpHotRegion|OpLeader|OpRegion), expect: OpHotRegion, }, { - op: newTestOperator(1, OpRegion|OpLeader), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpRegion|OpLeader), expect: OpRegion, }, { - op: newTestOperator(1, OpLeader), + op: NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader), expect: OpLeader, }, } @@ -535,7 +530,7 @@ func (suite *operatorTestSuite) TestOpStepTimeout() { func (suite *operatorTestSuite) TestRecord() { re := suite.Require() - operator := newTestOperator(1, OpLeader, AddLearner{ToStore: 1, PeerID: 1}, RemovePeer{FromStore: 1, PeerID: 1}) + operator := NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader, AddLearner{ToStore: 1, PeerID: 1}, RemovePeer{FromStore: 1, PeerID: 1}) now := time.Now() time.Sleep(time.Second) ob := operator.Record(now) @@ -549,7 +544,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { TransferLeader{FromStore: 3, ToStore: 1}, RemovePeer{FromStore: 3}, } - op := newTestOperator(101, OpLeader|OpRegion, steps...) + op := NewTestOperator(101, &metapb.RegionEpoch{}, OpLeader|OpRegion, steps...) op.Start() obj := op.ToJSONObject() suite.Equal("test", obj.Desc) @@ -568,7 +563,7 @@ func (suite *operatorTestSuite) TestToJSONObject() { // Test TIMEOUT status. steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} - op = newTestOperator(1, OpLeader, steps...) + op = NewTestOperator(1, &metapb.RegionEpoch{}, OpLeader, steps...) op.Start() op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) suite.True(op.CheckTimeout()) diff --git a/pkg/schedule/placement/region_rule_cache_test.go b/pkg/schedule/placement/region_rule_cache_test.go index 835203bed26..e951ea10cc5 100644 --- a/pkg/schedule/placement/region_rule_cache_test.go +++ b/pkg/schedule/placement/region_rule_cache_test.go @@ -226,7 +226,7 @@ func (manager *RegionRuleFitCacheManager) mockRegionRuleFitCache(region *core.Re } } -// nolint +// nolint:unparam func mockStores(num int) []*core.StoreInfo { stores := make([]*core.StoreInfo, 0, num) now := time.Now() @@ -237,7 +237,6 @@ func mockStores(num int) []*core.StoreInfo { return stores } -// nolint func mockStoresNoHeartbeat(num int) []*core.StoreInfo { stores := make([]*core.StoreInfo, 0, num) for i := 1; i <= num; i++ { diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 7dff7bce41b..f761b3812c5 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -79,13 +79,11 @@ func init() { } // SchedulerType returns the type of the scheduler -// nolint func SchedulerType() string { return EvictLeaderType } // SchedulerArgs returns the args for the scheduler -// nolint func SchedulerArgs() []string { args := []string{"1"} return args diff --git a/server/api/region_test.go b/server/api/region_test.go index 5fd5db0f82e..0e5dcd97678 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -345,17 +345,23 @@ func TestRegionsWithKillRequest(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) re.NoError(err) - respCh := make(chan *http.Response) + doneCh := make(chan struct{}) go func() { - resp, err := testDialClient.Do(req) // nolint:bodyclose + resp, err := testDialClient.Do(req) + defer func() { + if resp != nil { + resp.Body.Close() + } + }() re.Error(err) re.Contains(err.Error(), "context canceled") - respCh <- resp + re.Nil(resp) + doneCh <- struct{}{} }() time.Sleep(100 * time.Millisecond) // wait for the request to be sent - cancel() // close the request - resp := <-respCh - re.Nil(resp) + cancel() + <-doneCh + close(doneCh) } type getRegionTestSuite struct { diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 698a0eb390a..d3da21ab3dd 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -2288,10 +2288,6 @@ func checkStaleRegion(origin *metapb.Region, region *metapb.Region) error { return nil } -func newTestOperator(regionID uint64, regionEpoch *metapb.RegionEpoch, kind operator.OpKind, steps ...operator.OpStep) *operator.Operator { - return operator.NewTestOperator(regionID, regionEpoch, kind, steps...) -} - func (c *testCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { id, err := c.AllocID() if err != nil { @@ -2404,19 +2400,19 @@ func TestBasic(t *testing.T) { re.NoError(tc.addLeaderRegion(1, 1)) - op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + op1 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) oc.AddWaitingOperator(op1) re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) re.Equal(op1.RegionID(), oc.GetOperator(1).RegionID()) // Region 1 already has an operator, cannot add another one. - op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) + op2 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) oc.AddWaitingOperator(op2) re.Equal(uint64(0), oc.OperatorCount(operator.OpRegion)) // Remove the operator manually, then we can add a new operator. re.True(oc.RemoveOperator(op1)) - op3 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) + op3 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) oc.AddWaitingOperator(op3) re.Equal(uint64(1), oc.OperatorCount(operator.OpRegion)) re.Equal(op3.RegionID(), oc.GetOperator(1).RegionID()) @@ -2709,7 +2705,7 @@ func TestCheckerIsBusy(t *testing.T) { re.NoError(tc.addLeaderRegion(regionID, 1)) switch operatorKind { case operator.OpReplica: - op := newTestOperator(regionID, tc.GetRegion(regionID).GetRegionEpoch(), operatorKind) + op := operator.NewTestOperator(regionID, tc.GetRegion(regionID).GetRegionEpoch(), operatorKind) re.Equal(1, co.GetOperatorController().AddWaitingOperator(op)) case operator.OpRegion | operator.OpMerge: if regionID%2 == 1 { @@ -3375,10 +3371,10 @@ func TestOperatorCount(t *testing.T) { re.NoError(tc.addLeaderRegion(1, 1)) re.NoError(tc.addLeaderRegion(2, 2)) { - op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + op1 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) oc.AddWaitingOperator(op1) re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) // 1:leader - op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) + op2 := operator.NewTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) oc.AddWaitingOperator(op2) re.Equal(uint64(2), oc.OperatorCount(operator.OpLeader)) // 1:leader, 2:leader re.True(oc.RemoveOperator(op1)) @@ -3386,11 +3382,11 @@ func TestOperatorCount(t *testing.T) { } { - op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) + op1 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) oc.AddWaitingOperator(op1) re.Equal(uint64(1), oc.OperatorCount(operator.OpRegion)) // 1:region 2:leader re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) - op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion) + op2 := operator.NewTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion) op2.SetPriorityLevel(constant.High) oc.AddWaitingOperator(op2) re.Equal(uint64(2), oc.OperatorCount(operator.OpRegion)) // 1:region 2:region @@ -3471,12 +3467,12 @@ func TestStoreOverloadedWithReplace(t *testing.T) { tc.putRegion(region) region = tc.GetRegion(2).Clone(core.SetApproximateSize(60)) tc.putRegion(region) - op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 1}) + op1 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 1}) re.True(oc.AddOperator(op1)) - op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 2}) + op2 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 2}) op2.SetPriorityLevel(constant.High) re.True(oc.AddOperator(op2)) - op3 := newTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3}) + op3 := operator.NewTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3}) re.False(oc.AddOperator(op3)) ops, _ := lb.Schedule(tc, false /* dryRun */) re.Empty(ops) @@ -3569,11 +3565,11 @@ func TestController(t *testing.T) { // count = 0 { re.True(sc.AllowSchedule(false)) - op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + op1 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) re.Equal(1, oc.AddWaitingOperator(op1)) // count = 1 re.True(sc.AllowSchedule(false)) - op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) + op2 := operator.NewTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) re.Equal(1, oc.AddWaitingOperator(op2)) // count = 2 re.False(sc.AllowSchedule(false)) @@ -3582,10 +3578,10 @@ func TestController(t *testing.T) { re.True(sc.AllowSchedule(false)) } - op11 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) + op11 := operator.NewTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) // add a PriorityKind operator will remove old operator { - op3 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpHotRegion) + op3 := operator.NewTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpHotRegion) op3.SetPriorityLevel(constant.High) re.Equal(1, oc.AddWaitingOperator(op11)) re.False(sc.AllowSchedule(false)) @@ -3596,10 +3592,10 @@ func TestController(t *testing.T) { // add a admin operator will remove old operator { - op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) + op2 := operator.NewTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) re.Equal(1, oc.AddWaitingOperator(op2)) re.False(sc.AllowSchedule(false)) - op4 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpAdmin) + op4 := operator.NewTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpAdmin) op4.SetPriorityLevel(constant.High) re.Equal(1, oc.AddWaitingOperator(op4)) re.True(sc.AllowSchedule(false)) @@ -3608,7 +3604,7 @@ func TestController(t *testing.T) { // test wrong region id. { - op5 := newTestOperator(3, &metapb.RegionEpoch{}, operator.OpHotRegion) + op5 := operator.NewTestOperator(3, &metapb.RegionEpoch{}, operator.OpHotRegion) re.Equal(0, oc.AddWaitingOperator(op5)) } @@ -3619,12 +3615,12 @@ func TestController(t *testing.T) { ConfVer: tc.GetRegion(1).GetRegionEpoch().GetConfVer(), } { - op6 := newTestOperator(1, epoch, operator.OpLeader) + op6 := operator.NewTestOperator(1, epoch, operator.OpLeader) re.Equal(0, oc.AddWaitingOperator(op6)) } epoch.Version-- { - op6 := newTestOperator(1, epoch, operator.OpLeader) + op6 := operator.NewTestOperator(1, epoch, operator.OpLeader) re.Equal(1, oc.AddWaitingOperator(op6)) re.True(oc.RemoveOperator(op6)) } diff --git a/server/grpc_service.go b/server/grpc_service.go index 7b1b4a2c5fc..2b3ee232686 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -1928,9 +1928,9 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg cli := forwardCli.getClient() if cli != nil { var regionsID []uint64 - // nolint + // nolint:staticcheck if request.GetRegionId() != 0 { - // nolint + // nolint:staticcheck regionsID = []uint64{request.GetRegionId()} } else { regionsID = request.GetRegionsId() @@ -1986,11 +1986,10 @@ func (s *GrpcServer) ScatterRegion(ctx context.Context, request *pdpb.ScatterReg }, nil } // TODO: Deprecate it use `request.GetRegionsID`. - //nolint + // nolint:staticcheck region := rc.GetRegion(request.GetRegionId()) if region == nil { if request.GetRegion() == nil { - //nolint return &pdpb.ScatterRegionResponse{ Header: s.wrapErrorToHeader(pdpb.ErrorType_REGION_NOT_FOUND, "region %d not found"), diff --git a/server/server.go b/server/server.go index ed0b03d9653..8d7b83cfe4a 100644 --- a/server/server.go +++ b/server/server.go @@ -127,7 +127,6 @@ var ( ) // Server is the pd server. It implements bs.Server -// nolint type Server struct { diagnosticspb.DiagnosticsServer diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index 9232b134f20..0913579f47e 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -135,7 +135,7 @@ func (suite *gcClientTestSuite) TestClientWatchWithRevision() { suite.testClientWatchWithRevision(true) } -// nolint +// nolint:revive func (suite *gcClientTestSuite) testClientWatchWithRevision(fromNewRevision bool) { re := suite.Require() testKeyspaceID := uint32(100) diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index e8f574ff8de..69d53463818 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -124,9 +124,15 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin re.Empty(primary) serverMap := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < serverNum; i++ { s, cleanup := suite.addServer(serviceName) - defer cleanup() // nolint + cleanups = append(cleanups, cleanup) serverMap[s.GetAddr()] = s } diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index eec0909df61..160eea167d6 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -89,9 +89,15 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { re := suite.Require() // add three nodes. nodes := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount+1; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() // nolint + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) @@ -139,9 +145,15 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { func (suite *keyspaceGroupTestSuite) TestAllocReplica() { re := suite.Require() nodes := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() // nolint + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) @@ -233,9 +245,15 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { re := suite.Require() nodes := make(map[string]bs.Server) nodesList := []string{} + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() // nolint + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s nodesList = append(nodesList, s.GetAddr()) } @@ -294,9 +312,15 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { re := suite.Require() nodes := make(map[string]bs.Server) + var cleanups []func() + defer func() { + for _, cleanup := range cleanups { + cleanup() + } + }() for i := 0; i < utils.DefaultKeyspaceGroupReplicaCount; i++ { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) - defer cleanup() // nolint + cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s } tests.WaitForPrimaryServing(re, nodes) diff --git a/tests/integrations/mcs/resourcemanager/resource_manager_test.go b/tests/integrations/mcs/resourcemanager/resource_manager_test.go index f8cf92dddac..17673213a97 100644 --- a/tests/integrations/mcs/resourcemanager/resource_manager_test.go +++ b/tests/integrations/mcs/resourcemanager/resource_manager_test.go @@ -706,7 +706,6 @@ func (suite *resourceManagerClientTestSuite) TestResourcePenalty() { c.Stop() } -// nolint:gosec func (suite *resourceManagerClientTestSuite) TestAcquireTokenBucket() { re := suite.Require() cli := suite.client diff --git a/tools/pd-api-bench/cases/cases.go b/tools/pd-api-bench/cases/cases.go index 787132a5816..72986df5ed8 100644 --- a/tools/pd-api-bench/cases/cases.go +++ b/tools/pd-api-bench/cases/cases.go @@ -37,6 +37,8 @@ var ( storesID []uint64 ) +const defaultKeyLen = 56 + // InitCluster initializes the cluster. func InitCluster(ctx context.Context, cli pd.Client, httpCli pdHttp.Client) error { statsResp, err := httpCli.GetRegionStatusByKeyRange(ctx, pdHttp.NewKeyRange([]byte(""), []byte("")), false) @@ -221,7 +223,7 @@ func (c *regionsStats) Do(ctx context.Context, cli pdHttp.Client) error { startID := c.regionSample*random*4 + 1 endID := c.regionSample*(random+1)*4 + 1 regionStats, err := cli.GetRegionStatusByKeyRange(ctx, - pdHttp.NewKeyRange(generateKeyForSimulator(startID, 56), generateKeyForSimulator(endID, 56)), false) + pdHttp.NewKeyRange(generateKeyForSimulator(startID), generateKeyForSimulator(endID)), false) if Debug { log.Info("do HTTP case", zap.String("case", c.name), zap.Any("region-stats", regionStats), zap.Error(err)) } @@ -297,7 +299,7 @@ func newGetRegion() func() GRPCCase { func (*getRegion) Unary(ctx context.Context, cli pd.Client) error { id := rand.Intn(totalRegion)*4 + 1 - _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56)) + _, err := cli.GetRegion(ctx, generateKeyForSimulator(id)) if err != nil { return err } @@ -321,7 +323,7 @@ func newGetRegionEnableFollower() func() GRPCCase { func (*getRegionEnableFollower) Unary(ctx context.Context, cli pd.Client) error { id := rand.Intn(totalRegion)*4 + 1 - _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56), pd.WithAllowFollowerHandle()) + _, err := cli.GetRegion(ctx, generateKeyForSimulator(id), pd.WithAllowFollowerHandle()) if err != nil { return err } @@ -350,7 +352,7 @@ func (c *scanRegions) Unary(ctx context.Context, cli pd.Client) error { random := rand.Intn(upperBound) startID := c.regionSample*random*4 + 1 endID := c.regionSample*(random+1)*4 + 1 - _, err := cli.ScanRegions(ctx, generateKeyForSimulator(startID, 56), generateKeyForSimulator(endID, 56), c.regionSample) + _, err := cli.ScanRegions(ctx, generateKeyForSimulator(startID), generateKeyForSimulator(endID), c.regionSample) if err != nil { return err } @@ -427,9 +429,8 @@ func (*getStores) Unary(ctx context.Context, cli pd.Client) error { return nil } -// nolint -func generateKeyForSimulator(id int, keyLen int) []byte { - k := make([]byte, keyLen) +func generateKeyForSimulator(id int) []byte { + k := make([]byte, defaultKeyLen) copy(k, fmt.Sprintf("%010d", id)) return k } diff --git a/tools/pd-api-bench/main.go b/tools/pd-api-bench/main.go index dff40555fd6..f9feeeea580 100644 --- a/tools/pd-api-bench/main.go +++ b/tools/pd-api-bench/main.go @@ -341,7 +341,6 @@ func runHTTPServer(cfg *config.Config, co *cases.Coordinator) { } c.IndentedJSON(http.StatusOK, cfg) }) - // nolint engine.Run(cfg.StatusAddr) } diff --git a/tools/pd-simulator/main.go b/tools/pd-simulator/main.go index 5d781757b39..73f4a0bba12 100644 --- a/tools/pd-simulator/main.go +++ b/tools/pd-simulator/main.go @@ -128,8 +128,11 @@ func runHTTPServer() { http.Handle("/pprof/allocs", pprof.Handler("allocs")) http.Handle("/pprof/block", pprof.Handler("block")) http.Handle("/pprof/goroutine", pprof.Handler("goroutine")) - // nolint - http.ListenAndServe(*statusAddress, nil) + server := &http.Server{ + Addr: *statusAddress, + ReadHeaderTimeout: 3 * time.Second, + } + server.ListenAndServe() } // NewSingleServer creates a pd server for simulator. From b0ca2ebfd1bce5744aca02cfa110548e7dbc3a8a Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Sun, 7 Apr 2024 12:17:20 +0800 Subject: [PATCH 5/8] fix rc config Signed-off-by: Ryan Leung --- pkg/mcs/resourcemanager/server/config.go | 43 +++++++++++++----------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index 3c7886daec4..a4f2734cb9f 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -108,15 +108,18 @@ type ControllerConfig struct { } // Adjust adjusts the configuration and initializes it with the default value if necessary. -// FIXME: is it expected? -func (rmc *ControllerConfig) Adjust(_ *configutil.ConfigMetaData) { +func (rmc *ControllerConfig) Adjust(meta *configutil.ConfigMetaData) { if rmc == nil { return } - rmc.RequestUnit.Adjust() + rmc.RequestUnit.Adjust(meta) - configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, defaultDegradedModeWaitDuration) - configutil.AdjustDuration(&rmc.LTBMaxWaitDuration, defaultMaxWaitDuration) + if !meta.IsDefined("degraded-mode-wait-duration") { + configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, defaultDegradedModeWaitDuration) + } + if !meta.IsDefined("ltb-max-wait-duration") { + configutil.AdjustDuration(&rmc.LTBMaxWaitDuration, defaultMaxWaitDuration) + } failpoint.Inject("enableDegradedMode", func() { configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, time.Second) }) @@ -145,30 +148,30 @@ type RequestUnitConfig struct { } // Adjust adjusts the configuration and initializes it with the default value if necessary. -func (ruc *RequestUnitConfig) Adjust() { +func (ruc *RequestUnitConfig) Adjust(meta *configutil.ConfigMetaData) { if ruc == nil { return } - if ruc.ReadBaseCost == 0 { - ruc.ReadBaseCost = defaultReadBaseCost + if !meta.IsDefined("read-base-cost") { + configutil.AdjustFloat64(&ruc.ReadBaseCost, defaultReadBaseCost) } - if ruc.ReadPerBatchBaseCost == 0 { - ruc.ReadPerBatchBaseCost = defaultReadPerBatchBaseCost + if !meta.IsDefined("read-per-batch-base-cost") { + configutil.AdjustFloat64(&ruc.ReadPerBatchBaseCost, defaultReadPerBatchBaseCost) } - if ruc.ReadCostPerByte == 0 { - ruc.ReadCostPerByte = defaultReadCostPerByte + if !meta.IsDefined("read-cost-per-byte") { + configutil.AdjustFloat64(&ruc.ReadCostPerByte, defaultReadCostPerByte) } - if ruc.WriteBaseCost == 0 { - ruc.WriteBaseCost = defaultWriteBaseCost + if !meta.IsDefined("write-base-cost") { + configutil.AdjustFloat64(&ruc.WriteBaseCost, defaultWriteBaseCost) } - if ruc.WritePerBatchBaseCost == 0 { - ruc.WritePerBatchBaseCost = defaultWritePerBatchBaseCost + if !meta.IsDefined("write-per-batch-base-cost") { + configutil.AdjustFloat64(&ruc.WritePerBatchBaseCost, defaultWritePerBatchBaseCost) } - if ruc.WriteCostPerByte == 0 { - ruc.WriteCostPerByte = defaultWriteCostPerByte + if !meta.IsDefined("write-cost-per-byte") { + configutil.AdjustFloat64(&ruc.WriteCostPerByte, defaultWriteCostPerByte) } - if ruc.CPUMsCost == 0 { - ruc.CPUMsCost = defaultCPUMsCost + if !meta.IsDefined("read-cpu-ms-cost") { + configutil.AdjustFloat64(&ruc.CPUMsCost, defaultCPUMsCost) } } From 302b96b4ff40e8990c8217d6030cbc6d1aa66748 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Sun, 7 Apr 2024 14:21:58 +0800 Subject: [PATCH 6/8] fix test Signed-off-by: Ryan Leung --- pkg/mcs/resourcemanager/server/config.go | 3 +-- .../resourcemanager/resource_manager_command_test.go | 10 +++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index a4f2734cb9f..dfb2babe676 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -112,8 +112,7 @@ func (rmc *ControllerConfig) Adjust(meta *configutil.ConfigMetaData) { if rmc == nil { return } - rmc.RequestUnit.Adjust(meta) - + rmc.RequestUnit.Adjust(meta.Child("request-unit")) if !meta.IsDefined("degraded-mode-wait-duration") { configutil.AdjustDuration(&rmc.DegradedModeWaitDuration, defaultDegradedModeWaitDuration) } diff --git a/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go b/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go index 5cfc16ffb02..d387a2b87ae 100644 --- a/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go +++ b/tools/pd-ctl/tests/resourcemanager/resource_manager_command_test.go @@ -58,7 +58,7 @@ func (s *testResourceManagerSuite) TearDownSuite() { func (s *testResourceManagerSuite) TestConfigController() { re := s.Require() - expectCfg := server.ControllerConfig{} + expectCfg := server.Config{} expectCfg.Adjust(nil) // Show controller config checkShow := func() { @@ -69,7 +69,7 @@ func (s *testResourceManagerSuite) TestConfigController() { actualCfg := server.ControllerConfig{} err = json.Unmarshal(output, &actualCfg) re.NoError(err, string(output)) - re.Equal(expectCfg, actualCfg) + re.Equal(expectCfg.Controller, actualCfg) } // Check default config @@ -80,20 +80,20 @@ func (s *testResourceManagerSuite) TestConfigController() { output, err := tests.ExecuteCommand(ctl.GetRootCmd(), args...) re.NoError(err) re.Contains(string(output), "Success!") - expectCfg.LTBMaxWaitDuration = typeutil.Duration{Duration: 1 * time.Hour} + expectCfg.Controller.LTBMaxWaitDuration = typeutil.Duration{Duration: 1 * time.Hour} checkShow() args = []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "enable-controller-trace-log", "true"} output, err = tests.ExecuteCommand(ctl.GetRootCmd(), args...) re.NoError(err) re.Contains(string(output), "Success!") - expectCfg.EnableControllerTraceLog = true + expectCfg.Controller.EnableControllerTraceLog = true checkShow() args = []string{"-u", s.pdAddr, "resource-manager", "config", "controller", "set", "write-base-cost", "2"} output, err = tests.ExecuteCommand(ctl.GetRootCmd(), args...) re.NoError(err) re.Contains(string(output), "Success!") - expectCfg.RequestUnit.WriteBaseCost = 2 + expectCfg.Controller.RequestUnit.WriteBaseCost = 2 checkShow() } From ff5110f151274c91e254fcacfa8b468ee259a115 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 10 Apr 2024 16:37:52 +0800 Subject: [PATCH 7/8] fix conflicts Signed-off-by: Ryan Leung --- client/retry/backoff_test.go | 2 +- pkg/ratelimit/runner.go | 2 +- pkg/ratelimit/runner_test.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/client/retry/backoff_test.go b/client/retry/backoff_test.go index 5aa651a1b53..8df06b75f94 100644 --- a/client/retry/backoff_test.go +++ b/client/retry/backoff_test.go @@ -169,7 +169,7 @@ func (w *testingWriter) Write(p []byte) (n int, err error) { w.messages = append(w.messages, m) return n, nil } -func (w *testingWriter) Sync() error { +func (*testingWriter) Sync() error { return nil } diff --git a/pkg/ratelimit/runner.go b/pkg/ratelimit/runner.go index 661668af3b9..dd92a10179d 100644 --- a/pkg/ratelimit/runner.go +++ b/pkg/ratelimit/runner.go @@ -162,7 +162,7 @@ func NewSyncRunner() *SyncRunner { } // RunTask runs the task synchronously. -func (s *SyncRunner) RunTask(ctx context.Context, opt TaskOpts, f func(context.Context)) error { +func (*SyncRunner) RunTask(ctx context.Context, _ TaskOpts, f func(context.Context)) error { f(ctx) return nil } diff --git a/pkg/ratelimit/runner_test.go b/pkg/ratelimit/runner_test.go index 8a9eff77379..9b8dca231d1 100644 --- a/pkg/ratelimit/runner_test.go +++ b/pkg/ratelimit/runner_test.go @@ -36,7 +36,7 @@ func TestAsyncRunner(t *testing.T) { err := runner.RunTask(context.Background(), TaskOpts{ TaskName: "test1", Limit: limiter, - }, func(ctx context.Context) { + }, func(context.Context) { defer wg.Done() time.Sleep(100 * time.Millisecond) }) @@ -55,7 +55,7 @@ func TestAsyncRunner(t *testing.T) { err := runner.RunTask(context.Background(), TaskOpts{ TaskName: "test2", Limit: limiter, - }, func(ctx context.Context) { + }, func(context.Context) { defer wg.Done() time.Sleep(100 * time.Millisecond) }) From 14503e39bc1eb14848536a6552cd3c68efa0a88c Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 11 Apr 2024 11:25:46 +0800 Subject: [PATCH 8/8] fix Signed-off-by: Ryan Leung --- tools/pd-ut/ut.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/pd-ut/ut.go b/tools/pd-ut/ut.go index 5d50bbd51a8..69a83f007b6 100644 --- a/tools/pd-ut/ut.go +++ b/tools/pd-ut/ut.go @@ -562,7 +562,7 @@ func failureCases(input []JUnitTestCase) int { return sum } -func (n *numa) testCommand(pkg string, fn string) *exec.Cmd { +func (*numa) testCommand(pkg string, fn string) *exec.Cmd { args := make([]string, 0, 10) exe := "./" + testFileName(pkg) args = append(args, "-test.cpu", "1")