Skip to content

Commit

Permalink
Merge branch 'master' into hot-gc
Browse files Browse the repository at this point in the history
  • Loading branch information
lhy1024 authored Oct 25, 2024
2 parents 53db307 + 1474864 commit b14c35c
Show file tree
Hide file tree
Showing 204 changed files with 700 additions and 652 deletions.
15 changes: 15 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,16 @@ linters:
- copyloopvar
- goimports
- depguard
- asasalint
- asciicheck
- bidichk
- durationcheck
- gocheckcompilerdirectives
- gochecksumtype
- makezero
- protogetter
- reassign
- intrange
linters-settings:
gocritic:
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
Expand All @@ -36,16 +46,21 @@ linters-settings:
- G115
testifylint:
enable:
- blank-import
- bool-compare
- compares
- empty
- error-is-as
- error-nil
- expected-actual
- formatter
- len
- negative-positive
- require-error
- suite-dont-use-pkg
- suite-extra-assert-call
- suite-subtest-run
- useless-assert
disable:
- float-compare
- go-require
Expand Down
2 changes: 1 addition & 1 deletion client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -739,7 +739,7 @@ func (c *client) dispatchTSORequestWithRetry(ctx context.Context, dcLocation str
err error
req *tsoRequest
)
for i := 0; i < dispatchRetryCount; i++ {
for i := range dispatchRetryCount {
// Do not delay for the first time.
if i > 0 {
time.Sleep(dispatchRetryDelay)
Expand Down
6 changes: 3 additions & 3 deletions client/pd_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ func (c *pdServiceBalancer) set(clients []ServiceClient) {
func (c *pdServiceBalancer) check() {
c.mu.Lock()
defer c.mu.Unlock()
for i := 0; i < c.totalNode; i++ {
for range c.totalNode {
c.now.markAsAvailable()
c.next()
}
Expand Down Expand Up @@ -523,7 +523,7 @@ func (c *pdServiceDiscovery) initRetry(f func() error) error {
var err error
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for i := 0; i < c.option.maxRetryTimes; i++ {
for range c.option.maxRetryTimes {
if err = f(); err == nil {
return nil
}
Expand Down Expand Up @@ -1093,7 +1093,7 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader
})
c.all.Store(clients)
// create candidate services for all kinds of request.
for i := 0; i < int(apiKindCount); i++ {
for i := range apiKindCount {
c.apiCandidateNodes[i].set(clients)
}
return err
Expand Down
6 changes: 3 additions & 3 deletions client/pd_service_discovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func (suite *serviceClientTestSuite) SetupSuite() {
suite.followerServer = newTestServer(false)
go suite.leaderServer.run()
go suite.followerServer.run()
for i := 0; i < 10; i++ {
for range 10 {
leaderConn, err1 := grpc.Dial(suite.leaderServer.addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
followerConn, err2 := grpc.Dial(suite.followerServer.addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err1 == nil && err2 == nil {
Expand Down Expand Up @@ -278,7 +278,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() {
b.set([]ServiceClient{leader, follower})
re.Equal(2, b.totalNode)

for i := 0; i < 10; i++ {
for range 10 {
client := b.get()
ctx := client.BuildGRPCTargetContext(suite.ctx, false)
conn := client.GetClientConn()
Expand All @@ -292,7 +292,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() {
suite.followerServer.server.resetCount()
suite.leaderServer.server.resetCount()

for i := 0; i < 10; i++ {
for range 10 {
client := b.get()
ctx := client.BuildGRPCTargetContext(suite.ctx, true)
conn := client.GetClientConn()
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1347,7 +1347,7 @@ func (gc *groupCostController) acquireTokens(ctx context.Context, delta *rmpb.Co
d time.Duration
)
retryLoop:
for i := 0; i < gc.mainCfg.WaitRetryTimes; i++ {
for range gc.mainCfg.WaitRetryTimes {
now := time.Now()
switch gc.mode {
case rmpb.GroupMode_RawMode:
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/limiter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ func testQPSCase(concurrency int, reserveN int64, limit int64) (qps float64, ru
var totalRequests int64
start := time.Now()

for i := 0; i < concurrency; i++ {
for range concurrency {
wg.Add(1)
go func() {
defer wg.Done()
Expand Down
4 changes: 2 additions & 2 deletions client/resource_manager_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ func (c *client) tryResourceManagerConnect(ctx context.Context, connection *reso
)
ticker := time.NewTicker(retryInterval)
defer ticker.Stop()
for i := 0; i < maxRetryTimes; i++ {
for range maxRetryTimes {
cc, err := c.resourceManagerClient()
if err != nil {
continue
Expand All @@ -406,7 +406,7 @@ func (c *client) tryResourceManagerConnect(ctx context.Context, connection *reso
}

func (tbc *tokenBatchController) revokePendingTokenRequest(err error) {
for i := 0; i < len(tbc.tokenRequestCh); i++ {
for range len(tbc.tokenRequestCh) {
req := <-tbc.tokenRequestCh
req.done <- err
}
Expand Down
2 changes: 1 addition & 1 deletion client/retry/backoff_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func TestBackoffer(t *testing.T) {
bo = InitialBackoffer(base, max, total)
re.Equal(bo.nextInterval(), base)
re.Equal(bo.nextInterval(), 2*base)
for i := 0; i < 10; i++ {
for range 10 {
re.LessOrEqual(bo.nextInterval(), max)
}
re.Equal(bo.nextInterval(), max)
Expand Down
2 changes: 1 addition & 1 deletion client/testutil/tempurl.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ var (

// Alloc allocates a local URL for testing.
func Alloc() string {
for i := 0; i < 10; i++ {
for range 10 {
if u := tryAllocTestURL(); u != "" {
return u
}
Expand Down
2 changes: 1 addition & 1 deletion client/timerpool/pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
func TestTimerPool(t *testing.T) {
var tp TimerPool

for i := 0; i < 100; i++ {
for range 100 {
timer := tp.Get(20 * time.Millisecond)

select {
Expand Down
2 changes: 1 addition & 1 deletion client/tso_batch_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (tbc *tsoBatchController) adjustBestBatchSize() {
}

func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical int64, suffixBits uint32, streamID string, err error) {
for i := 0; i < tbc.collectedRequestCount; i++ {
for i := range tbc.collectedRequestCount {
tsoReq := tbc.collectedRequests[i]
// Retrieve the request context before the request is done to trace without race.
requestCtx := tsoReq.requestCtx
Expand Down
4 changes: 2 additions & 2 deletions client/tso_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ func (c *tsoClient) backupClientConn() (*grpc.ClientConn, string) {
cc *grpc.ClientConn
err error
)
for i := 0; i < len(urls); i++ {
for range urls {
url := urls[rand.Intn(len(urls))]
if cc, err = c.svcDiscovery.GetOrCreateGRPCConn(url); err != nil {
continue
Expand Down Expand Up @@ -403,7 +403,7 @@ func (c *tsoClient) tryConnectToTSO(
ticker := time.NewTicker(retryInterval)
defer ticker.Stop()
// Retry several times before falling back to the follower when the network problem happens
for i := 0; i < maxRetryTimes; i++ {
for range maxRetryTimes {
c.svcDiscovery.ScheduleCheckMemberChanged()
cc, url = c.GetTSOAllocatorClientConnByDCLocation(dc)
if _, ok := connectionCtxs.Load(url); ok {
Expand Down
2 changes: 1 addition & 1 deletion client/tso_dispatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func (td *tsoDispatcher) scheduleUpdateConnectionCtxs() {
}

func (td *tsoDispatcher) revokePendingRequests(err error) {
for i := 0; i < len(td.tsoRequestCh); i++ {
for range len(td.tsoRequestCh) {
req := <-td.tsoRequestCh
req.tryDone(err)
}
Expand Down
4 changes: 2 additions & 2 deletions client/tso_dispatcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func (s *testTSODispatcherSuite) testStaticConcurrencyImpl(concurrency int) {
// and will be batched together once there is a free token.
reqs := make([]*tsoRequest, 0, tokenCount+3)

for i := 0; i < tokenCount+3; i++ {
for range tokenCount + 3 {
req := s.sendReq(ctx)
s.reqMustNotReady(req)
reqs = append(reqs, req)
Expand All @@ -242,7 +242,7 @@ func (s *testTSODispatcherSuite) testStaticConcurrencyImpl(concurrency int) {
// second batch but not finished yet.
// Also note that in current implementation, the tsoStream tries to receive the next result before checking
// the `tsoStream.pendingRequests` queue. Changing this behavior may need to update this test.
for i := 0; i < tokenCount+3; i++ {
for i := range tokenCount + 3 {
expectedPending := tokenCount + 1 - i
if expectedPending > tokenCount {
expectedPending = tokenCount
Expand Down
2 changes: 1 addition & 1 deletion client/tso_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (c *tsoServiceDiscovery) retry(
var err error
ticker := time.NewTicker(retryInterval)
defer ticker.Stop()
for i := 0; i < maxRetryTimes; i++ {
for range maxRetryTimes {
if err = f(); err == nil {
return nil
}
Expand Down
10 changes: 5 additions & 5 deletions client/tso_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ func (s *testTSOStreamSuite) TestTSOStreamBasic() {
func (s *testTSOStreamSuite) testTSOStreamBrokenImpl(err error, pendingRequests int) {
var resultCh []<-chan callbackInvocation

for i := 0; i < pendingRequests; i++ {
for range pendingRequests {
ch := s.mustProcessRequestWithResultCh(1)
resultCh = append(resultCh, ch)
s.noResult(ch)
Expand Down Expand Up @@ -414,7 +414,7 @@ func (s *testTSOStreamSuite) TestTSOStreamCanceledWithPendingReq() {
func (s *testTSOStreamSuite) TestTSOStreamFIFO() {
var resultChs []<-chan callbackInvocation
const count = 5
for i := 0; i < count; i++ {
for i := range count {
ch := s.mustProcessRequestWithResultCh(int64(i + 1))
resultChs = append(resultChs, ch)
}
Expand All @@ -423,7 +423,7 @@ func (s *testTSOStreamSuite) TestTSOStreamFIFO() {
s.noResult(ch)
}

for i := 0; i < count; i++ {
for i := range count {
s.inner.returnResult(int64((i+1)*10), int64(i), uint32(i+1))
}

Expand Down Expand Up @@ -505,7 +505,7 @@ func (s *testTSOStreamSuite) TestEstimatedLatency() {
reqStartTimeCh := make(chan time.Time, maxPendingRequestsInTSOStream)
// Limit concurrent requests to be less than the capacity of tsoStream.pendingRequests.
tokenCh := make(chan struct{}, maxPendingRequestsInTSOStream-1)
for i := 0; i < 40; i++ {
for range 40 {
tokenCh <- struct{}{}
}
// Return a result after 50ms delay for each requests
Expand Down Expand Up @@ -594,7 +594,7 @@ func TestRCFilter(t *testing.T) {
re.Equal(0.0, f.update(now, 0))
lastOutput := 0.0
// 10000 even samples in 1 second.
for i := 0; i < 10000; i++ {
for range 10000 {
now = now.Add(time.Microsecond * 100)
output := f.update(now, 1.0)
re.Greater(output, lastOutput)
Expand Down
14 changes: 14 additions & 0 deletions codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,17 @@ flag_management:
target: 74% # increase it if you want to enforce higher coverage for project, current setting as 74% is for do not let the error be reported and lose the meaning of warning.
- type: patch
target: 74% # increase it if you want to enforce higher coverage for project, current setting as 74% is for do not let the error be reported and lose the meaning of warning.

ignore:
- "tools/pd-analysis"
- "tools/pd-api-bench"
- "tools/pd-backup"
- "tools/pd-heartbeat-bench"
- "tools/pd-recover"
- "tools/pd-simulator"
- "tools/pd-tso-bench"
- "tools/pd-ut"
- "tools/regions-dump"
- "tools/stores-dump"
- "tests"

6 changes: 3 additions & 3 deletions pkg/autoscaling/prometheus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ var podNameTemplate = map[ComponentType]string{
func generatePodNames(component ComponentType) []string {
names := make([]string, 0, instanceCount)
pattern := podNameTemplate[component]
for i := 0; i < instanceCount; i++ {
for i := range instanceCount {
names = append(names, fmt.Sprintf(pattern, mockClusterName, i))
}
return names
Expand Down Expand Up @@ -119,7 +119,7 @@ func (c *normalClient) buildCPUMockData(component ComponentType) {
cpuQuotaQuery := cpuQuotaPromQLTemplate[component]

var results []result
for i := 0; i < instanceCount; i++ {
for i := range instanceCount {
results = append(results, result{
Value: []any{time.Now().Unix(), fmt.Sprintf("%f", mockResultValue)},
Metric: metric{
Expand Down Expand Up @@ -192,7 +192,7 @@ func TestRetrieveCPUMetrics(t *testing.T) {
options := NewQueryOptions(component, metric, addresses[:len(addresses)-1], time.Now(), mockDuration)
result, err := querier.Query(options)
re.NoError(err)
for i := 0; i < len(addresses)-1; i++ {
for i := range len(addresses) - 1 {
value, ok := result[addresses[i]]
re.True(ok)
re.Less(math.Abs(value-mockResultValue), 1e-6)
Expand Down
6 changes: 3 additions & 3 deletions pkg/balancer/balancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func TestBalancerPutAndDelete(t *testing.T) {
re.Equal(uint32(0), balancer.Next())
// test put
exists := make(map[uint32]struct{})
for i := 0; i < 100; i++ {
for range 100 {
num := rand.Uint32()
balancer.Put(num)
exists[num] = struct{}{}
Expand Down Expand Up @@ -77,12 +77,12 @@ func TestBalancerDuplicate(t *testing.T) {
func TestRoundRobin(t *testing.T) {
re := require.New(t)
balancer := NewRoundRobin[uint32]()
for i := 0; i < 100; i++ {
for range 100 {
num := rand.Uint32()
balancer.Put(num)
}
statistics := make(map[uint32]int)
for i := 0; i < 1000; i++ {
for range 1000 {
statistics[balancer.Next()]++
}
min := 1000
Expand Down
Loading

0 comments on commit b14c35c

Please sign in to comment.