diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index 9d84675b7be..c0bdb7ea492 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -10,7 +10,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.20.1 + go-version: '1.21' - name: Checkout code uses: actions/checkout@v3 - name: Restore cache diff --git a/.github/workflows/pd-docker-image.yaml b/.github/workflows/pd-docker-image.yaml index c1e35a28160..2a04c030016 100644 --- a/.github/workflows/pd-docker-image.yaml +++ b/.github/workflows/pd-docker-image.yaml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: 1.20.1 + go-version: '1.21' - name: Checkout code uses: actions/checkout@v3 - name: Make diff --git a/.github/workflows/pd-tests.yaml b/.github/workflows/pd-tests.yaml index 1a9e353b21a..73e31fd4ad1 100644 --- a/.github/workflows/pd-tests.yaml +++ b/.github/workflows/pd-tests.yaml @@ -29,7 +29,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.20.1" + go-version: '1.21' - name: Checkout code uses: actions/checkout@v3 - name: Restore cache diff --git a/.github/workflows/tso-consistency-test.yaml b/.github/workflows/tso-consistency-test.yaml index 06fe453821e..570cbbc5da8 100644 --- a/.github/workflows/tso-consistency-test.yaml +++ b/.github/workflows/tso-consistency-test.yaml @@ -10,7 +10,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.20.1" + go-version: '1.21' - name: Checkout code uses: actions/checkout@v3 - name: Make TSO Consistency Test diff --git a/.github/workflows/tso-function-test.yaml b/.github/workflows/tso-function-test.yaml index 7c988e465ee..ee7679602f5 100644 --- a/.github/workflows/tso-function-test.yaml +++ b/.github/workflows/tso-function-test.yaml @@ -21,7 +21,7 @@ jobs: steps: - uses: actions/setup-go@v3 with: - go-version: "1.20.1" + go-version: '1.21' - name: Checkout code uses: actions/checkout@v3 - name: Make TSO Function Test diff --git a/Dockerfile b/Dockerfile index 0899007031d..550b1c1bb72 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine as builder +FROM golang:1.21-alpine as builder RUN apk add --no-cache \ make \ diff --git a/client/client.go b/client/client.go index b1b6e862e74..d3d3805fc4d 100644 --- a/client/client.go +++ b/client/client.go @@ -1457,6 +1457,9 @@ func trimHTTPPrefix(str string) string { } func (c *client) LoadGlobalConfig(ctx context.Context, names []string, configPath string) ([]GlobalConfigItem, int64, error) { + ctx, cancel := context.WithTimeout(ctx, c.option.timeout) + defer cancel() + ctx = grpcutil.BuildForwardContext(ctx, c.GetLeaderAddr()) protoClient := c.getClient() if protoClient == nil { return nil, 0, errs.ErrClientGetProtoClient @@ -1486,6 +1489,9 @@ func (c *client) StoreGlobalConfig(ctx context.Context, configPath string, items for i, it := range items { resArr[i] = &pdpb.GlobalConfigItem{Name: it.Name, Value: it.Value, Kind: it.EventType, Payload: it.PayLoad} } + ctx, cancel := context.WithTimeout(ctx, c.option.timeout) + defer cancel() + ctx = grpcutil.BuildForwardContext(ctx, c.GetLeaderAddr()) protoClient := c.getClient() if protoClient == nil { return errs.ErrClientGetProtoClient @@ -1501,6 +1507,9 @@ func (c *client) WatchGlobalConfig(ctx context.Context, configPath string, revis // TODO: Add retry mechanism // register watch components there globalConfigWatcherCh := make(chan []GlobalConfigItem, 16) + ctx, cancel := context.WithTimeout(ctx, c.option.timeout) + defer cancel() + ctx = grpcutil.BuildForwardContext(ctx, c.GetLeaderAddr()) protoClient := c.getClient() if protoClient == nil { return nil, errs.ErrClientGetProtoClient @@ -1547,6 +1556,9 @@ func (c *client) WatchGlobalConfig(ctx context.Context, configPath string, revis } func (c *client) GetExternalTimestamp(ctx context.Context) (uint64, error) { + ctx, cancel := context.WithTimeout(ctx, c.option.timeout) + defer cancel() + ctx = grpcutil.BuildForwardContext(ctx, c.GetLeaderAddr()) protoClient := c.getClient() if protoClient == nil { return 0, errs.ErrClientGetProtoClient @@ -1565,6 +1577,9 @@ func (c *client) GetExternalTimestamp(ctx context.Context) (uint64, error) { } func (c *client) SetExternalTimestamp(ctx context.Context, timestamp uint64) error { + ctx, cancel := context.WithTimeout(ctx, c.option.timeout) + defer cancel() + ctx = grpcutil.BuildForwardContext(ctx, c.GetLeaderAddr()) protoClient := c.getClient() if protoClient == nil { return errs.ErrClientGetProtoClient diff --git a/client/gc_client.go b/client/gc_client.go index c573836d2ba..b5d64e25129 100644 --- a/client/gc_client.go +++ b/client/gc_client.go @@ -102,6 +102,9 @@ func (c *client) WatchGCSafePointV2(ctx context.Context, revision int64) (chan [ Revision: revision, } + ctx, cancel := context.WithTimeout(ctx, c.option.timeout) + defer cancel() + ctx = grpcutil.BuildForwardContext(ctx, c.GetLeaderAddr()) protoClient := c.getClient() if protoClient == nil { return nil, errs.ErrClientGetProtoClient diff --git a/client/go.mod b/client/go.mod index cf1349ca20a..9eb066d0fcc 100644 --- a/client/go.mod +++ b/client/go.mod @@ -1,6 +1,6 @@ module github.com/tikv/pd/client -go 1.20 +go 1.21 require ( github.com/cloudfoundry/gosigar v1.3.6 diff --git a/client/go.sum b/client/go.sum index 2884e84a1fa..33ba3254d53 100644 --- a/client/go.sum +++ b/client/go.sum @@ -21,6 +21,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -50,6 +51,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -76,8 +78,11 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -212,6 +217,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -237,10 +243,12 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/go.mod b/go.mod index da75ab13372..27182c0e1d3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/tikv/pd -go 1.20 +go 1.21 require ( github.com/AlekSi/gocov-xml v1.0.0 diff --git a/go.sum b/go.sum index 74ef74bb07e..e2da3969165 100644 --- a/go.sum +++ b/go.sum @@ -69,6 +69,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpfpgJhvkuYWBmOYbo= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= @@ -204,11 +205,14 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -280,14 +284,23 @@ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= +github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -370,6 +383,7 @@ github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfp github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= +github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus= github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -414,6 +428,7 @@ github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7c github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -865,9 +880,11 @@ gorm.io/datatypes v1.1.0/go.mod h1:SH2K9R+2RMjuX1CkCONrPwoe9JzVv2hkQvEu4bXGojE= gorm.io/driver/mysql v1.4.5 h1:u1lytId4+o9dDaNcPCFzNv7h6wvmc92UjNk3z8enSBU= gorm.io/driver/mysql v1.4.5/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc= gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= +gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0= +gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig= gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= diff --git a/pkg/core/region.go b/pkg/core/region.go index 450fab499e6..768635adf19 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -634,6 +634,11 @@ func (r *RegionInfo) GetReplicationStatus() *replication_modepb.RegionReplicatio return r.replicationStatus } +// IsFlashbackChanged returns true if flashback changes. +func (r *RegionInfo) IsFlashbackChanged(l *RegionInfo) bool { + return r.meta.FlashbackStartTs != l.meta.FlashbackStartTs || r.meta.IsInFlashback != l.meta.IsInFlashback +} + // IsFromHeartbeat returns whether the region info is from the region heartbeat. func (r *RegionInfo) IsFromHeartbeat() bool { return r.fromHeartbeat @@ -760,6 +765,14 @@ func GenerateRegionGuideFunc(enableLog bool) RegionGuideFunc { (region.GetReplicationStatus().GetState() != origin.GetReplicationStatus().GetState() || region.GetReplicationStatus().GetStateId() != origin.GetReplicationStatus().GetStateId()) { saveCache = true + return + } + // Do not save to kv, because 1) flashback will be eventually set to + // false, 2) flashback changes almost all regions in a cluster. + // Saving kv may downgrade PD performance when there are many regions. + if region.IsFlashbackChanged(origin) { + saveCache = true + return } } return diff --git a/pkg/core/region_option.go b/pkg/core/region_option.go index ba46fab9420..ce959905810 100644 --- a/pkg/core/region_option.go +++ b/pkg/core/region_option.go @@ -183,6 +183,14 @@ func WithDecConfVer() RegionCreateOption { } } +// WithFlashback set region flashback states. +func WithFlashback(isInFlashback bool, flashbackTS uint64) RegionCreateOption { + return func(region *RegionInfo) { + region.meta.FlashbackStartTs = flashbackTS + region.meta.IsInFlashback = isInFlashback + } +} + // SetCPUUsage sets the CPU usage of the region. func SetCPUUsage(v uint64) RegionCreateOption { return func(region *RegionInfo) { diff --git a/pkg/election/lease.go b/pkg/election/lease.go index 1e3e66ddcce..441f66a4cbd 100644 --- a/pkg/election/lease.go +++ b/pkg/election/lease.go @@ -174,8 +174,11 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c expire := start.Add(time.Duration(res.TTL) * time.Second) select { case ch <- expire: - case <-ctx1.Done(): + // Here we don't use `ctx1.Done()` because we want to make sure if the keep alive success, we can update the expire time. + case <-ctx.Done(): } + } else { + log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose)) } }() diff --git a/pkg/election/lease_test.go b/pkg/election/lease_test.go index dd10108277c..70f55230293 100644 --- a/pkg/election/lease_test.go +++ b/pkg/election/lease_test.go @@ -101,3 +101,34 @@ func TestLease(t *testing.T) { time.Sleep((defaultLeaseTimeout + 1) * time.Second) re.True(lease1.IsExpired()) } + +func TestLeaseKeepAlive(t *testing.T) { + re := require.New(t) + cfg := etcdutil.NewTestSingleConfig(t) + etcd, err := embed.StartEtcd(cfg) + defer func() { + etcd.Close() + }() + re.NoError(err) + + ep := cfg.LCUrls[0].String() + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + }) + re.NoError(err) + + <-etcd.Server.ReadyNotify() + + // Create the lease. + lease := &lease{ + Purpose: "test_lease", + client: client, + lease: clientv3.NewLease(client), + } + + re.NoError(lease.Grant(defaultLeaseTimeout)) + ch := lease.keepAliveWorker(context.Background(), 2*time.Second) + time.Sleep(2 * time.Second) + <-ch + re.NoError(lease.Close()) +} diff --git a/pkg/keyspace/keyspace.go b/pkg/keyspace/keyspace.go index 1ce599b5f81..1607676a37b 100644 --- a/pkg/keyspace/keyspace.go +++ b/pkg/keyspace/keyspace.go @@ -309,7 +309,7 @@ func (manager *Manager) splitKeyspaceRegion(id uint32, waitRegionSplit bool) (er }) start := time.Now() - keyspaceRule := makeLabelRule(id) + keyspaceRule := MakeLabelRule(id) cl, ok := manager.cluster.(interface{ GetRegionLabeler() *labeler.RegionLabeler }) if !ok { return errors.New("cluster does not support region label") diff --git a/pkg/keyspace/util.go b/pkg/keyspace/util.go index 2923dc7053f..6042a0b23be 100644 --- a/pkg/keyspace/util.go +++ b/pkg/keyspace/util.go @@ -193,8 +193,8 @@ func getRegionLabelID(id uint32) string { return regionLabelIDPrefix + strconv.FormatUint(uint64(id), endpoint.SpaceIDBase) } -// makeLabelRule makes the label rule for the given keyspace id. -func makeLabelRule(id uint32) *labeler.LabelRule { +// MakeLabelRule makes the label rule for the given keyspace id. +func MakeLabelRule(id uint32) *labeler.LabelRule { return &labeler.LabelRule{ ID: getRegionLabelID(id), Index: 0, diff --git a/pkg/keyspace/util_test.go b/pkg/keyspace/util_test.go index c7b3738a811..3f9396d6989 100644 --- a/pkg/keyspace/util_test.go +++ b/pkg/keyspace/util_test.go @@ -121,6 +121,6 @@ func TestMakeLabelRule(t *testing.T) { }, } for _, testCase := range testCases { - re.Equal(testCase.expectedLabelRule, makeLabelRule(testCase.id)) + re.Equal(testCase.expectedLabelRule, MakeLabelRule(testCase.id)) } } diff --git a/pkg/mcs/resourcemanager/server/server.go b/pkg/mcs/resourcemanager/server/server.go index 645e118e1c9..19c10bb8cf9 100644 --- a/pkg/mcs/resourcemanager/server/server.go +++ b/pkg/mcs/resourcemanager/server/server.go @@ -128,9 +128,11 @@ func (s *Server) primaryElectionLoop() { defer s.serverLoopWg.Done() for { - if s.IsClosed() { + select { + case <-s.serverLoopCtx.Done(): log.Info("server is closed, exit resource manager primary election loop") return + default: } primary, checkAgain := s.participant.CheckLeader() diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 88b79ad9669..f58fba2ed0b 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -6,33 +6,121 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mcs/scheduling/server/config" + sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/statistics/utils" + "github.com/tikv/pd/pkg/storage" ) // Cluster is used to manage all information for scheduling purpose. type Cluster struct { - basicCluster *core.BasicCluster + *core.BasicCluster ruleManager *placement.RuleManager labelerManager *labeler.RegionLabeler persistConfig *config.PersistConfig + hotStat *statistics.HotStat + storage storage.Storage } const regionLabelGCInterval = time.Hour // NewCluster creates a new cluster. -func NewCluster(ctx context.Context, storage endpoint.RuleStorage, cfg *config.Config) (*Cluster, error) { +func NewCluster(ctx context.Context, storage storage.Storage, cfg *config.Config) (*Cluster, error) { basicCluster := core.NewBasicCluster() persistConfig := config.NewPersistConfig(cfg) labelerManager, err := labeler.NewRegionLabeler(ctx, storage, regionLabelGCInterval) if err != nil { return nil, err } + return &Cluster{ - basicCluster: basicCluster, + BasicCluster: basicCluster, ruleManager: placement.NewRuleManager(storage, basicCluster, persistConfig), labelerManager: labelerManager, persistConfig: persistConfig, + hotStat: statistics.NewHotStat(ctx), + storage: storage, }, nil } + +// GetBasicCluster returns the basic cluster. +func (c *Cluster) GetBasicCluster() *core.BasicCluster { + return c.BasicCluster +} + +// GetSharedConfig returns the shared config. +func (c *Cluster) GetSharedConfig() sc.SharedConfigProvider { + return c.persistConfig +} + +// GetRuleManager returns the rule manager. +func (c *Cluster) GetRuleManager() *placement.RuleManager { + return c.ruleManager +} + +// GetRegionLabeler returns the region labeler. +func (c *Cluster) GetRegionLabeler() *labeler.RegionLabeler { + return c.labelerManager +} + +// GetStoresLoads returns load stats of all stores. +func (c *Cluster) GetStoresLoads() map[uint64][]float64 { + return c.hotStat.GetStoresLoads() +} + +// IsRegionHot checks if a region is in hot state. +func (c *Cluster) IsRegionHot(region *core.RegionInfo) bool { + return c.hotStat.IsRegionHot(region, c.persistConfig.GetHotRegionCacheHitsThreshold()) +} + +// GetHotPeerStat returns hot peer stat with specified regionID and storeID. +func (c *Cluster) GetHotPeerStat(rw utils.RWType, regionID, storeID uint64) *statistics.HotPeerStat { + return c.hotStat.GetHotPeerStat(rw, regionID, storeID) +} + +// RegionReadStats returns hot region's read stats. +// The result only includes peers that are hot enough. +// RegionStats is a thread-safe method +func (c *Cluster) RegionReadStats() map[uint64][]*statistics.HotPeerStat { + // As read stats are reported by store heartbeat, the threshold needs to be adjusted. + threshold := c.persistConfig.GetHotRegionCacheHitsThreshold() * + (utils.RegionHeartBeatReportInterval / utils.StoreHeartBeatReportInterval) + return c.hotStat.RegionStats(utils.Read, threshold) +} + +// RegionWriteStats returns hot region's write stats. +// The result only includes peers that are hot enough. +func (c *Cluster) RegionWriteStats() map[uint64][]*statistics.HotPeerStat { + // RegionStats is a thread-safe method + return c.hotStat.RegionStats(utils.Write, c.persistConfig.GetHotRegionCacheHitsThreshold()) +} + +// BucketsStats returns hot region's buckets stats. +func (c *Cluster) BucketsStats(degree int, regionIDs ...uint64) map[uint64][]*buckets.BucketStat { + return c.hotStat.BucketsStats(degree, regionIDs...) +} + +// GetStorage returns the storage. +func (c *Cluster) GetStorage() storage.Storage { + return c.storage +} + +// GetCheckerConfig returns the checker config. +func (c *Cluster) GetCheckerConfig() sc.CheckerConfigProvider { return c.persistConfig } + +// GetSchedulerConfig returns the scheduler config. +func (c *Cluster) GetSchedulerConfig() sc.SchedulerConfigProvider { return c.persistConfig } + +// GetStoreConfig returns the store config. +func (c *Cluster) GetStoreConfig() sc.StoreConfigProvider { return c.persistConfig } + +// TODO: implement the following methods + +// UpdateRegionsLabelLevelStats updates the region label level stats. +func (c *Cluster) UpdateRegionsLabelLevelStats(regions []*core.RegionInfo) {} + +// AllocID allocates a new ID. +func (c *Cluster) AllocID() (uint64, error) { return 0, nil } diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 48ec0917e25..7839ec7f274 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -33,9 +33,11 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mcs/utils" sc "github.com/tikv/pd/pkg/schedule/config" + "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/configutil" "github.com/tikv/pd/pkg/utils/grpcutil" "github.com/tikv/pd/pkg/utils/metricutil" + "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" ) @@ -193,6 +195,7 @@ type PersistConfig struct { clusterVersion unsafe.Pointer schedule atomic.Value replication atomic.Value + storeConfig atomic.Value } // NewPersistConfig creates a new PersistConfig instance. @@ -201,6 +204,9 @@ func NewPersistConfig(cfg *Config) *PersistConfig { o.SetClusterVersion(&cfg.ClusterVersion) o.schedule.Store(&cfg.Schedule) o.replication.Store(&cfg.Replication) + // storeConfig will be fetched from TiKV by PD API server, + // so we just set an empty value here first. + o.storeConfig.Store(&sc.StoreConfig{}) return o } @@ -234,6 +240,19 @@ func (o *PersistConfig) SetReplicationConfig(cfg *sc.ReplicationConfig) { o.replication.Store(cfg) } +// SetStoreConfig sets the TiKV store configuration. +func (o *PersistConfig) SetStoreConfig(cfg *sc.StoreConfig) { + // Some of the fields won't be persisted and watched, + // so we need to adjust it here before storing it. + cfg.Adjust() + o.storeConfig.Store(cfg) +} + +// GetStoreConfig returns the TiKV store configuration. +func (o *PersistConfig) GetStoreConfig() *sc.StoreConfig { + return o.storeConfig.Load().(*sc.StoreConfig) +} + // GetMaxReplicas returns the max replicas. func (o *PersistConfig) GetMaxReplicas() int { return int(o.GetReplicationConfig().MaxReplicas) @@ -264,24 +283,54 @@ func (o *PersistConfig) GetHighSpaceRatio() float64 { return o.GetScheduleConfig().HighSpaceRatio } +// GetHotRegionScheduleLimit returns the limit for hot region schedule. +func (o *PersistConfig) GetHotRegionScheduleLimit() uint64 { + return o.GetScheduleConfig().HotRegionScheduleLimit +} + +// GetRegionScheduleLimit returns the limit for region schedule. +func (o *PersistConfig) GetRegionScheduleLimit() uint64 { + return o.GetScheduleConfig().RegionScheduleLimit +} + +// GetLeaderScheduleLimit returns the limit for leader schedule. +func (o *PersistConfig) GetLeaderScheduleLimit() uint64 { + return o.GetScheduleConfig().LeaderScheduleLimit +} + +// GetReplicaScheduleLimit returns the limit for replica schedule. +func (o *PersistConfig) GetReplicaScheduleLimit() uint64 { + return o.GetScheduleConfig().ReplicaScheduleLimit +} + +// GetMergeScheduleLimit returns the limit for merge schedule. +func (o *PersistConfig) GetMergeScheduleLimit() uint64 { + return o.GetScheduleConfig().MergeScheduleLimit +} + +// GetLeaderSchedulePolicy is to get leader schedule policy. +func (o *PersistConfig) GetLeaderSchedulePolicy() constant.SchedulePolicy { + return constant.StringToSchedulePolicy(o.GetScheduleConfig().LeaderSchedulePolicy) +} + // GetMaxStoreDownTime returns the max store downtime. func (o *PersistConfig) GetMaxStoreDownTime() time.Duration { return o.GetScheduleConfig().MaxStoreDownTime.Duration } +// GetIsolationLevel returns the isolation label for each region. +func (o *PersistConfig) GetIsolationLevel() string { + return o.GetReplicationConfig().IsolationLevel +} + // GetLocationLabels returns the location labels. func (o *PersistConfig) GetLocationLabels() []string { return o.GetReplicationConfig().LocationLabels } -// CheckLabelProperty checks if the label property is satisfied. -func (o *PersistConfig) CheckLabelProperty(typ string, labels []*metapb.StoreLabel) bool { - return false -} - // IsUseJointConsensus returns if the joint consensus is enabled. func (o *PersistConfig) IsUseJointConsensus() bool { - return true + return o.GetScheduleConfig().EnableJointConsensus } // GetKeyType returns the key type. @@ -299,9 +348,14 @@ func (o *PersistConfig) IsOneWayMergeEnabled() bool { return o.GetScheduleConfig().EnableOneWayMerge } -// GetMergeScheduleLimit returns the merge schedule limit. -func (o *PersistConfig) GetMergeScheduleLimit() uint64 { - return o.GetScheduleConfig().MergeScheduleLimit +// GetMaxMergeRegionSize returns the max region size. +func (o *PersistConfig) GetMaxMergeRegionSize() uint64 { + return o.GetScheduleConfig().MaxMergeRegionSize +} + +// GetMaxMergeRegionKeys returns the max region keys. +func (o *PersistConfig) GetMaxMergeRegionKeys() uint64 { + return o.GetScheduleConfig().MaxMergeRegionKeys } // GetRegionScoreFormulaVersion returns the region score formula version. @@ -314,6 +368,96 @@ func (o *PersistConfig) GetSchedulerMaxWaitingOperator() uint64 { return o.GetScheduleConfig().SchedulerMaxWaitingOperator } +// GetHotRegionCacheHitsThreshold returns the hot region cache hits threshold. +func (o *PersistConfig) GetHotRegionCacheHitsThreshold() int { + return int(o.GetScheduleConfig().HotRegionCacheHitsThreshold) +} + +// GetMaxMovableHotPeerSize returns the max movable hot peer size. +func (o *PersistConfig) GetMaxMovableHotPeerSize() int64 { + return o.GetScheduleConfig().MaxMovableHotPeerSize +} + +// GetSwitchWitnessInterval returns the interval between promote to non-witness and starting to switch to witness. +func (o *PersistConfig) GetSwitchWitnessInterval() time.Duration { + return o.GetScheduleConfig().SwitchWitnessInterval.Duration +} + +// GetSplitMergeInterval returns the interval between finishing split and starting to merge. +func (o *PersistConfig) GetSplitMergeInterval() time.Duration { + return o.GetScheduleConfig().SplitMergeInterval.Duration +} + +// GetSlowStoreEvictingAffectedStoreRatioThreshold returns the affected ratio threshold when judging a store is slow. +func (o *PersistConfig) GetSlowStoreEvictingAffectedStoreRatioThreshold() float64 { + return o.GetScheduleConfig().SlowStoreEvictingAffectedStoreRatioThreshold +} + +// GetPatrolRegionInterval returns the interval of patrolling region. +func (o *PersistConfig) GetPatrolRegionInterval() time.Duration { + return o.GetScheduleConfig().PatrolRegionInterval.Duration +} + +// GetTolerantSizeRatio gets the tolerant size ratio. +func (o *PersistConfig) GetTolerantSizeRatio() float64 { + return o.GetScheduleConfig().TolerantSizeRatio +} + +// GetWitnessScheduleLimit returns the limit for region schedule. +func (o *PersistConfig) GetWitnessScheduleLimit() uint64 { + return o.GetScheduleConfig().WitnessScheduleLimit +} + +// IsDebugMetricsEnabled returns if debug metrics is enabled. +func (o *PersistConfig) IsDebugMetricsEnabled() bool { + return o.GetScheduleConfig().EnableDebugMetrics +} + +// IsDiagnosticAllowed returns whether is enable to use diagnostic. +func (o *PersistConfig) IsDiagnosticAllowed() bool { + return o.GetScheduleConfig().EnableDiagnostic +} + +// IsRemoveDownReplicaEnabled returns if remove down replica is enabled. +func (o *PersistConfig) IsRemoveDownReplicaEnabled() bool { + return o.GetScheduleConfig().EnableRemoveDownReplica +} + +// IsReplaceOfflineReplicaEnabled returns if replace offline replica is enabled. +func (o *PersistConfig) IsReplaceOfflineReplicaEnabled() bool { + return o.GetScheduleConfig().EnableReplaceOfflineReplica +} + +// IsMakeUpReplicaEnabled returns if make up replica is enabled. +func (o *PersistConfig) IsMakeUpReplicaEnabled() bool { + return o.GetScheduleConfig().EnableMakeUpReplica +} + +// IsRemoveExtraReplicaEnabled returns if remove extra replica is enabled. +func (o *PersistConfig) IsRemoveExtraReplicaEnabled() bool { + return o.GetScheduleConfig().EnableRemoveExtraReplica +} + +// IsLocationReplacementEnabled returns if location replace is enabled. +func (o *PersistConfig) IsLocationReplacementEnabled() bool { + return o.GetScheduleConfig().EnableLocationReplacement +} + +// IsWitnessAllowed returns if the witness is allowed. +func (o *PersistConfig) IsWitnessAllowed() bool { + return o.GetScheduleConfig().EnableWitness +} + +// IsPlacementRulesCacheEnabled returns if the placement rules cache is enabled. +func (o *PersistConfig) IsPlacementRulesCacheEnabled() bool { + return o.GetReplicationConfig().EnablePlacementRulesCache +} + +// IsSchedulingHalted returns if PD scheduling is halted. +func (o *PersistConfig) IsSchedulingHalted() bool { + return o.GetScheduleConfig().HaltScheduling +} + // GetStoreLimitByType returns the limit of a store with a given type. func (o *PersistConfig) GetStoreLimitByType(storeID uint64, typ storelimit.Type) (returned float64) { limit := o.GetStoreLimit(storeID) @@ -346,18 +490,125 @@ func (o *PersistConfig) GetStoreLimit(storeID uint64) (returnSC sc.StoreLimitCon return o.GetScheduleConfig().StoreLimit[storeID] } -// IsWitnessAllowed returns if the witness is allowed. -func (o *PersistConfig) IsWitnessAllowed() bool { - return false +// SetAllStoresLimit sets all store limit for a given type and rate. +func (o *PersistConfig) SetAllStoresLimit(typ storelimit.Type, ratePerMin float64) { + v := o.GetScheduleConfig().Clone() + switch typ { + case storelimit.AddPeer: + sc.DefaultStoreLimit.SetDefaultStoreLimit(storelimit.AddPeer, ratePerMin) + for storeID := range v.StoreLimit { + sc := sc.StoreLimitConfig{AddPeer: ratePerMin, RemovePeer: v.StoreLimit[storeID].RemovePeer} + v.StoreLimit[storeID] = sc + } + case storelimit.RemovePeer: + sc.DefaultStoreLimit.SetDefaultStoreLimit(storelimit.RemovePeer, ratePerMin) + for storeID := range v.StoreLimit { + sc := sc.StoreLimitConfig{AddPeer: v.StoreLimit[storeID].AddPeer, RemovePeer: ratePerMin} + v.StoreLimit[storeID] = sc + } + } + + o.SetScheduleConfig(v) } -// IsPlacementRulesCacheEnabled returns if the placement rules cache is enabled. -func (o *PersistConfig) IsPlacementRulesCacheEnabled() bool { +// SetMaxReplicas sets the number of replicas for each region. +func (o *PersistConfig) SetMaxReplicas(replicas int) { + v := o.GetReplicationConfig().Clone() + v.MaxReplicas = uint64(replicas) + o.SetReplicationConfig(v) +} + +// IsSchedulerDisabled returns if the scheduler is disabled. +func (o *PersistConfig) IsSchedulerDisabled(t string) bool { + schedulers := o.GetScheduleConfig().Schedulers + for _, s := range schedulers { + if t == s.Type { + return s.Disable + } + } return false } // SetPlacementRulesCacheEnabled sets if the placement rules cache is enabled. -func (o *PersistConfig) SetPlacementRulesCacheEnabled(b bool) {} +func (o *PersistConfig) SetPlacementRulesCacheEnabled(enabled bool) { + v := o.GetReplicationConfig().Clone() + v.EnablePlacementRulesCache = enabled + o.SetReplicationConfig(v) +} // SetEnableWitness sets if the witness is enabled. -func (o *PersistConfig) SetEnableWitness(b bool) {} +func (o *PersistConfig) SetEnableWitness(enable bool) { + v := o.GetScheduleConfig().Clone() + v.EnableWitness = enable + o.SetScheduleConfig(v) +} + +// SetPlacementRuleEnabled set PlacementRuleEnabled +func (o *PersistConfig) SetPlacementRuleEnabled(enabled bool) { + v := o.GetReplicationConfig().Clone() + v.EnablePlacementRules = enabled + o.SetReplicationConfig(v) +} + +// SetSplitMergeInterval to set the interval between finishing split and starting to merge. It's only used to test. +func (o *PersistConfig) SetSplitMergeInterval(splitMergeInterval time.Duration) { + v := o.GetScheduleConfig().Clone() + v.SplitMergeInterval = typeutil.Duration{Duration: splitMergeInterval} + o.SetScheduleConfig(v) +} + +// SetHaltScheduling set HaltScheduling. +func (o *PersistConfig) SetHaltScheduling(halt bool, source string) { + v := o.GetScheduleConfig().Clone() + v.HaltScheduling = halt + o.SetScheduleConfig(v) +} + +// CheckRegionKeys return error if the smallest region's keys is less than mergeKeys +func (o *PersistConfig) CheckRegionKeys(keys, mergeKeys uint64) error { + return o.GetStoreConfig().CheckRegionKeys(keys, mergeKeys) +} + +// CheckRegionSize return error if the smallest region's size is less than mergeSize +func (o *PersistConfig) CheckRegionSize(size, mergeSize uint64) error { + return o.GetStoreConfig().CheckRegionSize(size, mergeSize) +} + +// GetRegionMaxSize returns the max region size in MB +func (o *PersistConfig) GetRegionMaxSize() uint64 { + return o.GetStoreConfig().GetRegionMaxSize() +} + +// GetRegionMaxKeys returns the region split keys +func (o *PersistConfig) GetRegionMaxKeys() uint64 { + return o.GetStoreConfig().GetRegionMaxKeys() +} + +// IsEnableRegionBucket return true if the region bucket is enabled. +func (o *PersistConfig) IsEnableRegionBucket() bool { + return o.GetStoreConfig().IsEnableRegionBucket() +} + +// TODO: implement the following methods + +// AddSchedulerCfg adds the scheduler configurations. +func (o *PersistConfig) AddSchedulerCfg(string, []string) {} + +// CheckLabelProperty checks if the label property is satisfied. +func (o *PersistConfig) CheckLabelProperty(typ string, labels []*metapb.StoreLabel) bool { + return false +} + +// IsTraceRegionFlow returns if the region flow is tracing. +// If the accuracy cannot reach 0.1 MB, it is considered not. +func (o *PersistConfig) IsTraceRegionFlow() bool { + return false +} + +// Persist saves the configuration to the storage. +func (o *PersistConfig) Persist(storage endpoint.ConfigStorage) error { + return nil +} + +// RemoveSchedulerCfg removes the scheduler configurations. +func (o *PersistConfig) RemoveSchedulerCfg(tp string) {} diff --git a/pkg/mcs/scheduling/server/config/config_watcher.go b/pkg/mcs/scheduling/server/config/watcher.go similarity index 97% rename from pkg/mcs/scheduling/server/config/config_watcher.go rename to pkg/mcs/scheduling/server/config/watcher.go index fdf957b72c5..81ec4b62f0c 100644 --- a/pkg/mcs/scheduling/server/config/config_watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -42,9 +42,10 @@ type Watcher struct { } type persistedConfig struct { + ClusterVersion semver.Version `json:"cluster-version"` Schedule sc.ScheduleConfig `json:"schedule"` Replication sc.ReplicationConfig `json:"replication"` - ClusterVersion semver.Version `json:"cluster-version"` + Store sc.StoreConfig `json:"store"` } // NewWatcher creates a new watcher to watch the config meta change from PD API server. @@ -71,9 +72,10 @@ func NewWatcher( zap.String("event-kv-key", string(kv.Key)), zap.Error(err)) return err } + cw.SetClusterVersion(&cfg.ClusterVersion) cw.SetScheduleConfig(&cfg.Schedule) cw.SetReplicationConfig(&cfg.Replication) - cw.SetClusterVersion(&cfg.ClusterVersion) + cw.SetStoreConfig(&cfg.Store) return nil } deleteFn := func(kv *mvccpb.KeyValue) error { diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go new file mode 100644 index 00000000000..c85644ff14f --- /dev/null +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -0,0 +1,227 @@ +// Copyright 2023 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rule + +import ( + "context" + "sync" + + "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/utils/etcdutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" +) + +// ruleStorage is an in-memory storage for Placement Rules, +// which will implement the `endpoint.RuleStorage` interface. +type ruleStorage struct { + // Rule key -> rule value. + rules sync.Map + // GroupID -> rule group value. + groups sync.Map + // Region rule key -> rule value. + regionRules sync.Map +} + +// LoadRules loads Placement Rules from storage. +func (rs *ruleStorage) LoadRules(f func(k, v string)) error { + rs.rules.Range(func(k, v interface{}) bool { + f(k.(string), v.(string)) + return true + }) + return nil +} + +// SaveRule stores a rule cfg to the rulesPath. +func (rs *ruleStorage) SaveRule(ruleKey string, rule interface{}) error { + rs.rules.Store(ruleKey, rule) + return nil +} + +// DeleteRule removes a rule from storage. +func (rs *ruleStorage) DeleteRule(ruleKey string) error { + rs.rules.Delete(ruleKey) + return nil +} + +// LoadRuleGroups loads all rule groups from storage. +func (rs *ruleStorage) LoadRuleGroups(f func(k, v string)) error { + rs.groups.Range(func(k, v interface{}) bool { + f(k.(string), v.(string)) + return true + }) + return nil +} + +// SaveRuleGroup stores a rule group config to storage. +func (rs *ruleStorage) SaveRuleGroup(groupID string, group interface{}) error { + rs.groups.Store(groupID, group) + return nil +} + +// DeleteRuleGroup removes a rule group from storage. +func (rs *ruleStorage) DeleteRuleGroup(groupID string) error { + rs.groups.Delete(groupID) + return nil +} + +// LoadRegionRules loads region rules from storage. +func (rs *ruleStorage) LoadRegionRules(f func(k, v string)) error { + rs.regionRules.Range(func(k, v interface{}) bool { + f(k.(string), v.(string)) + return true + }) + return nil +} + +// SaveRegionRule saves a region rule to the storage. +func (rs *ruleStorage) SaveRegionRule(ruleKey string, rule interface{}) error { + rs.regionRules.Store(ruleKey, rule) + return nil +} + +// DeleteRegionRule removes a region rule from storage. +func (rs *ruleStorage) DeleteRegionRule(ruleKey string) error { + rs.regionRules.Delete(ruleKey) + return nil +} + +// Watcher is used to watch the PD API server for any Placement Rule changes. +type Watcher struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + etcdClient *clientv3.Client + ruleStore *ruleStorage + + ruleWatcher *etcdutil.LoopWatcher + groupWatcher *etcdutil.LoopWatcher + labelWatcher *etcdutil.LoopWatcher +} + +// NewWatcher creates a new watcher to watch the Placement Rule change from PD API server. +// Please use `GetRuleStorage` to get the underlying storage to access the Placement Rules. +func NewWatcher( + ctx context.Context, + etcdClient *clientv3.Client, + // rulePath: + // - Key: /pd/{cluster_id}/rules/{group_id}-{rule_id} + // - Value: placement.Rule + // ruleGroupPath: + // - Key: /pd/{cluster_id}/rule_group/{group_id} + // - Value: placement.RuleGroup + // regionLabelPath: + // - Key: /pd/{cluster_id}/region_label/{rule_id} + // - Value: labeler.LabelRule + rulesPath, ruleGroupPath, regionLabelPath string, +) (*Watcher, error) { + ctx, cancel := context.WithCancel(ctx) + rw := &Watcher{ + ctx: ctx, + cancel: cancel, + etcdClient: etcdClient, + ruleStore: &ruleStorage{}, + } + err := rw.initializeRuleWatcher(rulesPath) + if err != nil { + return nil, err + } + err = rw.initializeGroupWatcher(ruleGroupPath) + if err != nil { + return nil, err + } + err = rw.initializeRegionLabelWatcher(regionLabelPath) + if err != nil { + return nil, err + } + return rw, nil +} + +func (rw *Watcher) initializeRuleWatcher(rulePath string) error { + putFn := func(kv *mvccpb.KeyValue) error { + // Since the PD API server will validate the rule before saving it to etcd, + // so we could directly save the string rule in JSON to the storage here. + return rw.ruleStore.SaveRule(string(kv.Key), string(kv.Value)) + } + deleteFn := func(kv *mvccpb.KeyValue) error { + return rw.ruleStore.DeleteRule(string(kv.Key)) + } + postEventFn := func() error { + return nil + } + rw.ruleWatcher = etcdutil.NewLoopWatcher( + rw.ctx, &rw.wg, + rw.etcdClient, + "scheduling-rule-watcher", rulePath, + putFn, deleteFn, postEventFn, + clientv3.WithPrefix(), + ) + rw.ruleWatcher.StartWatchLoop() + return rw.ruleWatcher.WaitLoad() +} + +func (rw *Watcher) initializeGroupWatcher(ruleGroupPath string) error { + putFn := func(kv *mvccpb.KeyValue) error { + return rw.ruleStore.SaveRuleGroup(string(kv.Key), string(kv.Value)) + } + deleteFn := func(kv *mvccpb.KeyValue) error { + return rw.ruleStore.DeleteRuleGroup(string(kv.Key)) + } + postEventFn := func() error { + return nil + } + rw.groupWatcher = etcdutil.NewLoopWatcher( + rw.ctx, &rw.wg, + rw.etcdClient, + "scheduling-rule-group-watcher", ruleGroupPath, + putFn, deleteFn, postEventFn, + clientv3.WithPrefix(), + ) + rw.groupWatcher.StartWatchLoop() + return rw.groupWatcher.WaitLoad() +} + +func (rw *Watcher) initializeRegionLabelWatcher(regionLabelPath string) error { + putFn := func(kv *mvccpb.KeyValue) error { + return rw.ruleStore.SaveRegionRule(string(kv.Key), string(kv.Value)) + } + deleteFn := func(kv *mvccpb.KeyValue) error { + return rw.ruleStore.DeleteRegionRule(string(kv.Key)) + } + postEventFn := func() error { + return nil + } + rw.labelWatcher = etcdutil.NewLoopWatcher( + rw.ctx, &rw.wg, + rw.etcdClient, + "scheduling-region-label-watcher", regionLabelPath, + putFn, deleteFn, postEventFn, + clientv3.WithPrefix(), + ) + rw.labelWatcher.StartWatchLoop() + return rw.labelWatcher.WaitLoad() +} + +// Close closes the watcher. +func (rw *Watcher) Close() { + rw.cancel() + rw.wg.Wait() +} + +// GetRuleStorage returns the rule storage. +func (rw *Watcher) GetRuleStorage() endpoint.RuleStorage { + return rw.ruleStore +} diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index a1c74ab0c99..845dbe38aa5 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -41,8 +41,11 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/discovery" "github.com/tikv/pd/pkg/mcs/scheduling/server/config" + "github.com/tikv/pd/pkg/mcs/scheduling/server/rule" "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/member" + "github.com/tikv/pd/pkg/schedule" + "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/etcdutil" @@ -71,11 +74,12 @@ type Server struct { serverLoopCancel func() serverLoopWg sync.WaitGroup - cfg *config.Config - name string - clusterID uint64 - listenURL *url.URL - backendUrls []url.URL + cfg *config.Config + name string + clusterID uint64 + listenURL *url.URL + backendUrls []url.URL + persistConfig *config.PersistConfig // etcd client etcdClient *clientv3.Client @@ -105,8 +109,15 @@ type Server struct { serviceID *discovery.ServiceRegistryEntry serviceRegister *discovery.ServiceRegister - cluster *Cluster - storage *endpoint.StorageEndpoint + cluster *Cluster + hbStreams *hbstream.HeartbeatStreams + storage *endpoint.StorageEndpoint + + coordinator *schedule.Coordinator + + // for watching the PD API server meta info updates that are related to the scheduling. + configWatcher *config.Watcher + ruleWatcher *rule.Watcher } // Name returns the unique etcd name for this server in etcd cluster. @@ -163,9 +174,11 @@ func (s *Server) primaryElectionLoop() { defer s.serverLoopWg.Done() for { - if s.IsClosed() { - log.Info("server is closed, exit scheduling primary election loop") + select { + case <-s.serverLoopCtx.Done(): + log.Info("server is closed, exit resource manager primary election loop") return + default: } primary, checkAgain := s.participant.CheckLeader() @@ -476,11 +489,18 @@ func (s *Server) startServer() (err error) { s.participant.InitInfo(uniqueName, uniqueID, path.Join(schedulingPrimaryPrefix, fmt.Sprintf("%05d", 0)), utils.PrimaryKey, "primary election", s.cfg.AdvertiseListenAddr) s.storage = endpoint.NewStorageEndpoint( - kv.NewEtcdKVBase(s.etcdClient, endpoint.SchedulingSvcRootPath(s.clusterID)), nil) + kv.NewEtcdKVBase(s.etcdClient, endpoint.PDRootPath(s.clusterID)), nil) s.cluster, err = NewCluster(s.ctx, s.storage, s.cfg) if err != nil { return err } + s.hbStreams = hbstream.NewHeartbeatStreams(s.ctx, s.clusterID, s.cluster.GetBasicCluster()) + s.coordinator = schedule.NewCoordinator(s.ctx, s.cluster, s.hbStreams) + + s.listenURL, err = url.Parse(s.cfg.ListenAddr) + if err != nil { + return err + } tlsConfig, err := s.cfg.Security.ToTLSConfig() if err != nil { return err @@ -494,7 +514,11 @@ func (s *Server) startServer() (err error) { if err != nil { return err } - + err = s.startWatcher() + if err != nil { + return err + } + go s.coordinator.RunUntilStop() serverReadyChan := make(chan struct{}) defer close(serverReadyChan) s.serverLoopWg.Add(1) @@ -524,12 +548,31 @@ func (s *Server) startServer() (err error) { return nil } +func (s *Server) startWatcher() (err error) { + s.configWatcher, err = config.NewWatcher( + s.ctx, s.etcdClient, + endpoint.ConfigPath(s.clusterID), + s.persistConfig, + ) + if err != nil { + return err + } + s.ruleWatcher, err = rule.NewWatcher( + s.ctx, s.etcdClient, + endpoint.RulesPath(s.clusterID), + endpoint.RuleGroupPath(s.clusterID), + endpoint.RegionLabelPath(s.clusterID), + ) + return err +} + // CreateServer creates the Server func CreateServer(ctx context.Context, cfg *config.Config) *Server { svr := &Server{ DiagnosticsServer: sysutil.NewDiagnosticsServer(cfg.Log.File.Filename), startTimestamp: time.Now().Unix(), cfg: cfg, + persistConfig: config.NewPersistConfig(cfg), ctx: ctx, } return svr diff --git a/pkg/mock/mockcluster/config.go b/pkg/mock/mockcluster/config.go index b8c35cb7046..6febba026e8 100644 --- a/pkg/mock/mockcluster/config.go +++ b/pkg/mock/mockcluster/config.go @@ -20,7 +20,6 @@ import ( sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/utils/typeutil" - "github.com/tikv/pd/server/config" ) // SetMaxMergeRegionSize updates the MaxMergeRegionSize configuration. @@ -173,16 +172,16 @@ func (mc *Cluster) SetMaxReplicasWithLabel(enablePlacementRules bool, num int, l // SetRegionMaxSize sets the region max size. func (mc *Cluster) SetRegionMaxSize(v string) { - mc.updateStoreConfig(func(r *config.StoreConfig) { r.RegionMaxSize = v }) + mc.updateStoreConfig(func(r *sc.StoreConfig) { r.RegionMaxSize = v }) } // SetRegionSizeMB sets the region max size. func (mc *Cluster) SetRegionSizeMB(v uint64) { - mc.updateStoreConfig(func(r *config.StoreConfig) { r.RegionMaxSizeMB = v }) + mc.updateStoreConfig(func(r *sc.StoreConfig) { r.RegionMaxSizeMB = v }) } -func (mc *Cluster) updateStoreConfig(f func(*config.StoreConfig)) { - r := mc.StoreConfigManager.GetStoreConfig().Clone() +func (mc *Cluster) updateStoreConfig(f func(*sc.StoreConfig)) { + r := mc.PersistOptions.GetStoreConfig().Clone() f(r) mc.SetStoreConfig(r) } diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 988a7788d06..ce392d26a39 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -34,6 +34,7 @@ import ( "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" @@ -56,7 +57,6 @@ type Cluster struct { *config.PersistOptions ID uint64 suspectRegions map[uint64]struct{} - *config.StoreConfigManager *buckets.HotBucketCache storage.Storage } @@ -64,15 +64,14 @@ type Cluster struct { // NewCluster creates a new Cluster func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster { c := &Cluster{ - ctx: ctx, - BasicCluster: core.NewBasicCluster(), - IDAllocator: mockid.NewIDAllocator(), - HotStat: statistics.NewHotStat(ctx), - HotBucketCache: buckets.NewBucketsCache(ctx), - PersistOptions: opts, - suspectRegions: map[uint64]struct{}{}, - StoreConfigManager: config.NewTestStoreConfigManager(nil), - Storage: storage.NewStorageWithMemoryBackend(), + ctx: ctx, + BasicCluster: core.NewBasicCluster(), + IDAllocator: mockid.NewIDAllocator(), + HotStat: statistics.NewHotStat(ctx), + HotBucketCache: buckets.NewBucketsCache(ctx), + PersistOptions: opts, + suspectRegions: map[uint64]struct{}{}, + Storage: storage.NewStorageWithMemoryBackend(), } if c.PersistOptions.GetReplicationConfig().EnablePlacementRules { c.initRuleManager() @@ -85,22 +84,32 @@ func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster { // GetStoreConfig returns the store config. func (mc *Cluster) GetStoreConfig() sc.StoreConfigProvider { - return mc.StoreConfigManager.GetStoreConfig() + return mc.PersistOptions.GetStoreConfig() +} + +// SetRegionBucketEnabled sets the region bucket enabled. +func (mc *Cluster) SetRegionBucketEnabled(enabled bool) { + cfg, ok := mc.GetStoreConfig().(*sc.StoreConfig) + if !ok || cfg == nil { + return + } + cfg.Coprocessor.EnableRegionBucket = enabled + mc.SetStoreConfig(cfg) } // GetCheckerConfig returns the checker config. func (mc *Cluster) GetCheckerConfig() sc.CheckerConfigProvider { - return mc + return mc.PersistOptions } // GetSchedulerConfig returns the scheduler config. func (mc *Cluster) GetSchedulerConfig() sc.SchedulerConfigProvider { - return mc + return mc.PersistOptions } // GetSharedConfig returns the shared config. func (mc *Cluster) GetSharedConfig() sc.SharedConfigProvider { - return mc + return mc.PersistOptions } // GetStorage returns the storage. @@ -113,11 +122,6 @@ func (mc *Cluster) AllocID() (uint64, error) { return mc.IDAllocator.Alloc() } -// GetPersistOptions returns the persist options. -func (mc *Cluster) GetPersistOptions() *config.PersistOptions { - return mc.PersistOptions -} - // UpdateRegionsLabelLevelStats updates the label level stats for the regions. func (mc *Cluster) UpdateRegionsLabelLevelStats(regions []*core.RegionInfo) {} @@ -145,7 +149,7 @@ func (mc *Cluster) IsRegionHot(region *core.RegionInfo) bool { } // GetHotPeerStat returns hot peer stat with specified regionID and storeID. -func (mc *Cluster) GetHotPeerStat(rw statistics.RWType, regionID, storeID uint64) *statistics.HotPeerStat { +func (mc *Cluster) GetHotPeerStat(rw utils.RWType, regionID, storeID uint64) *statistics.HotPeerStat { return mc.HotCache.GetHotPeerStat(rw, regionID, storeID) } @@ -153,7 +157,7 @@ func (mc *Cluster) GetHotPeerStat(rw statistics.RWType, regionID, storeID uint64 // The result only includes peers that are hot enough. func (mc *Cluster) RegionReadStats() map[uint64][]*statistics.HotPeerStat { // We directly use threshold for read stats for mockCluster - return mc.HotCache.RegionStats(statistics.Read, mc.GetHotRegionCacheHitsThreshold()) + return mc.HotCache.RegionStats(utils.Read, mc.GetHotRegionCacheHitsThreshold()) } // BucketsStats returns hot region's buckets stats. @@ -168,11 +172,11 @@ func (mc *Cluster) BucketsStats(degree int, regions ...uint64) map[uint64][]*buc // RegionWriteStats returns hot region's write stats. // The result only includes peers that are hot enough. func (mc *Cluster) RegionWriteStats() map[uint64][]*statistics.HotPeerStat { - return mc.HotCache.RegionStats(statistics.Write, mc.GetHotRegionCacheHitsThreshold()) + return mc.HotCache.RegionStats(utils.Write, mc.GetHotRegionCacheHitsThreshold()) } // HotRegionsFromStore picks hot regions in specify store. -func (mc *Cluster) HotRegionsFromStore(store uint64, kind statistics.RWType) []*core.RegionInfo { +func (mc *Cluster) HotRegionsFromStore(store uint64, kind utils.RWType) []*core.RegionInfo { stats := hotRegionsFromStore(mc.HotCache, store, kind, mc.GetHotRegionCacheHitsThreshold()) regions := make([]*core.RegionInfo, 0, len(stats)) for _, stat := range stats { @@ -185,7 +189,7 @@ func (mc *Cluster) HotRegionsFromStore(store uint64, kind statistics.RWType) []* } // hotRegionsFromStore picks hot region in specify store. -func hotRegionsFromStore(w *statistics.HotCache, storeID uint64, kind statistics.RWType, minHotDegree int) []*statistics.HotPeerStat { +func hotRegionsFromStore(w *statistics.HotCache, storeID uint64, kind utils.RWType, minHotDegree int) []*statistics.HotPeerStat { if stats, ok := w.RegionStats(kind, minHotDegree)[storeID]; ok && len(stats) > 0 { return stats } @@ -464,7 +468,7 @@ func (mc *Cluster) AddRegionWithReadInfo( r = r.Clone(core.SetReadKeys(readKeys)) r = r.Clone(core.SetReportInterval(0, reportInterval)) r = r.Clone(core.SetReadQuery(readQuery)) - filledNum := statistics.DefaultAotSize + filledNum := utils.DefaultAotSize if len(filledNums) > 0 { filledNum = filledNums[0] } @@ -473,7 +477,7 @@ func (mc *Cluster) AddRegionWithReadInfo( for i := 0; i < filledNum; i++ { items = mc.CheckRegionRead(r) for _, item := range items { - mc.HotCache.Update(item, statistics.Read) + mc.HotCache.Update(item, utils.Read) } } mc.PutRegion(r) @@ -485,7 +489,7 @@ func (mc *Cluster) AddRegionWithPeerReadInfo(regionID, leaderStoreID, targetStor otherPeerStoreIDs []uint64, filledNums ...int) []*statistics.HotPeerStat { r := mc.newMockRegionInfo(regionID, leaderStoreID, otherPeerStoreIDs...) r = r.Clone(core.SetReadBytes(readBytes), core.SetReadKeys(readKeys), core.SetReportInterval(0, reportInterval)) - filledNum := statistics.DefaultAotSize + filledNum := utils.DefaultAotSize if len(filledNums) > 0 { filledNum = filledNums[0] } @@ -494,7 +498,7 @@ func (mc *Cluster) AddRegionWithPeerReadInfo(regionID, leaderStoreID, targetStor items = mc.CheckRegionRead(r) for _, item := range items { if item.StoreID == targetStoreID { - mc.HotCache.Update(item, statistics.Read) + mc.HotCache.Update(item, utils.Read) } } } @@ -513,7 +517,7 @@ func (mc *Cluster) AddRegionLeaderWithReadInfo( r = r.Clone(core.SetReadKeys(readKeys)) r = r.Clone(core.SetReadQuery(readQuery)) r = r.Clone(core.SetReportInterval(0, reportInterval)) - filledNum := statistics.DefaultAotSize + filledNum := utils.DefaultAotSize if len(filledNums) > 0 { filledNum = filledNums[0] } @@ -522,7 +526,7 @@ func (mc *Cluster) AddRegionLeaderWithReadInfo( for i := 0; i < filledNum; i++ { items = mc.CheckRegionLeaderRead(r) for _, item := range items { - mc.HotCache.Update(item, statistics.Read) + mc.HotCache.Update(item, utils.Read) } } mc.PutRegion(r) @@ -541,7 +545,7 @@ func (mc *Cluster) AddLeaderRegionWithWriteInfo( r = r.Clone(core.SetReportInterval(0, reportInterval)) r = r.Clone(core.SetWrittenQuery(writtenQuery)) - filledNum := statistics.DefaultAotSize + filledNum := utils.DefaultAotSize if len(filledNums) > 0 { filledNum = filledNums[0] } @@ -550,7 +554,7 @@ func (mc *Cluster) AddLeaderRegionWithWriteInfo( for i := 0; i < filledNum; i++ { items = mc.CheckRegionWrite(r) for _, item := range items { - mc.HotCache.Update(item, statistics.Write) + mc.HotCache.Update(item, utils.Write) } } mc.PutRegion(r) @@ -734,7 +738,7 @@ func (mc *Cluster) updateStorageStatistics(storeID uint64, update func(*pdpb.Sto newStats := typeutil.DeepClone(store.GetStoreStats(), core.StoreStatsFactory) update(newStats) now := time.Now().Unix() - interval := &pdpb.TimeInterval{StartTimestamp: uint64(now - statistics.StoreHeartBeatReportInterval), EndTimestamp: uint64(now)} + interval := &pdpb.TimeInterval{StartTimestamp: uint64(now - utils.StoreHeartBeatReportInterval), EndTimestamp: uint64(now)} newStats.Interval = interval newStore := store.Clone(core.SetStoreStats(newStats)) mc.Set(storeID, newStats) diff --git a/pkg/replication/replication_mode.go b/pkg/replication/replication_mode.go index fee87e85b5a..5a52f562e60 100644 --- a/pkg/replication/replication_mode.go +++ b/pkg/replication/replication_mode.go @@ -340,25 +340,29 @@ func (m *ModeManager) Run(ctx context.Context) { go func() { defer wg.Done() + ticker := time.NewTicker(tickInterval) + defer ticker.Stop() for { select { - case <-time.After(tickInterval): + case <-ticker.C: + m.tickUpdateState() case <-ctx.Done(): return } - m.tickUpdateState() } }() go func() { defer wg.Done() + ticker := time.NewTicker(replicateStateInterval) + defer ticker.Stop() for { select { - case <-time.After(replicateStateInterval): + case <-ticker.C: + m.tickReplicateStatus() case <-ctx.Done(): return } - m.tickReplicateStatus() } }() diff --git a/pkg/schedule/config/config_provider.go b/pkg/schedule/config/config_provider.go index 3724d62e776..920467a07e2 100644 --- a/pkg/schedule/config/config_provider.go +++ b/pkg/schedule/config/config_provider.go @@ -68,11 +68,15 @@ type SchedulerConfigProvider interface { IsDebugMetricsEnabled() bool IsDiagnosticAllowed() bool GetSlowStoreEvictingAffectedStoreRatioThreshold() float64 + + GetScheduleConfig() *ScheduleConfig + SetScheduleConfig(*ScheduleConfig) } // CheckerConfigProvider is the interface for checker configurations. type CheckerConfigProvider interface { SharedConfigProvider + StoreConfigProvider GetSwitchWitnessInterval() time.Duration IsRemoveExtraReplicaEnabled() bool @@ -110,6 +114,7 @@ type SharedConfigProvider interface { GetStoreLimitByType(uint64, storelimit.Type) float64 IsWitnessAllowed() bool IsPlacementRulesCacheEnabled() bool + SetHaltScheduling(bool, string) // for test purpose SetPlacementRulesCacheEnabled(bool) @@ -120,22 +125,19 @@ type SharedConfigProvider interface { type ConfProvider interface { SchedulerConfigProvider CheckerConfigProvider + StoreConfigProvider // for test purpose SetPlacementRuleEnabled(bool) SetSplitMergeInterval(time.Duration) SetMaxReplicas(int) - SetAllStoresLimit(typ storelimit.Type, ratePerMin float64) - // only for store configuration - UseRaftV2() + SetAllStoresLimit(storelimit.Type, float64) } // StoreConfigProvider is the interface that wraps the StoreConfigProvider related methods. type StoreConfigProvider interface { GetRegionMaxSize() uint64 + GetRegionMaxKeys() uint64 CheckRegionSize(uint64, uint64) error CheckRegionKeys(uint64, uint64) error IsEnableRegionBucket() bool - IsRaftKV2() bool - // for test purpose - SetRegionBucketEnabled(bool) } diff --git a/server/config/store_config.go b/pkg/schedule/config/store_config.go similarity index 59% rename from server/config/store_config.go rename to pkg/schedule/config/store_config.go index a468ebae0ae..0d773d56471 100644 --- a/server/config/store_config.go +++ b/pkg/schedule/config/store_config.go @@ -16,16 +16,10 @@ package config import ( "encoding/json" - "fmt" - "io" - "net/http" "reflect" - "sync/atomic" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/slice" - "github.com/tikv/pd/pkg/utils/netutil" "github.com/tikv/pd/pkg/utils/typeutil" "go.uber.org/zap" ) @@ -41,8 +35,8 @@ var ( defaultRegionMaxKey = uint64(1440000) // default region split key is 960000 defaultRegionSplitKey = uint64(960000) - - raftStoreV2 = "raft-kv2" + // RaftstoreV2 is the v2 raftstore engine mark. + RaftstoreV2 = "raft-kv2" ) // StoreConfig is the config of store like TiKV. @@ -74,6 +68,16 @@ type Coprocessor struct { RegionBucketSize string `json:"region-bucket-size"` } +// Adjust adjusts the config to calculate some fields. +func (c *StoreConfig) Adjust() { + if c == nil { + return + } + c.RegionMaxSizeMB = typeutil.ParseMBFromText(c.RegionMaxSize, defaultRegionMaxSize) + c.RegionSplitSizeMB = typeutil.ParseMBFromText(c.RegionSplitSize, defaultRegionSplitSize) + c.RegionBucketSizeMB = typeutil.ParseMBFromText(c.RegionBucketSize, defaultBucketSize) +} + // Equal returns true if the two configs are equal. func (c *StoreConfig) Equal(other *StoreConfig) bool { return reflect.DeepEqual(c.Coprocessor, other.Coprocessor) && reflect.DeepEqual(c.Storage, other.Storage) @@ -133,7 +137,7 @@ func (c *StoreConfig) IsRaftKV2() bool { if c == nil { return false } - return c.Storage.Engine == raftStoreV2 + return c.Storage.Engine == RaftstoreV2 } // SetRegionBucketEnabled sets if the region bucket is enabled. @@ -189,135 +193,3 @@ func (c *StoreConfig) Clone() *StoreConfig { cfg := *c return &cfg } - -// StoreConfigManager is used to manage the store config. -type StoreConfigManager struct { - config atomic.Value - source Source -} - -// NewStoreConfigManager creates a new StoreConfigManager. -func NewStoreConfigManager(client *http.Client) *StoreConfigManager { - schema := "http" - if netutil.IsEnableHTTPS(client) { - schema = "https" - } - - manager := &StoreConfigManager{ - source: newTiKVConfigSource(schema, client), - } - manager.config.Store(&StoreConfig{}) - return manager -} - -// NewTestStoreConfigManager creates a new StoreConfigManager for test. -func NewTestStoreConfigManager(whiteList []string) *StoreConfigManager { - manager := &StoreConfigManager{ - source: newFakeSource(whiteList), - } - manager.config.Store(&StoreConfig{}) - return manager -} - -// ObserveConfig is used to observe the config change. -// switchRaftV2 is true if the new config's raft engine is v2 and the old is v1. -func (m *StoreConfigManager) ObserveConfig(address string) (switchRaftV2 bool, err error) { - cfg, err := m.source.GetConfig(address) - if err != nil { - return switchRaftV2, err - } - old := m.GetStoreConfig() - if cfg != nil && !old.Equal(cfg) { - log.Info("sync the store config successful", zap.String("store-address", address), zap.String("store-config", cfg.String()), zap.String("old-config", old.String())) - switchRaftV2 = m.update(cfg) - } - return switchRaftV2, nil -} - -// update returns true if the new config's raft engine is v2 and the old is v1 -func (m *StoreConfigManager) update(cfg *StoreConfig) (switchRaftV2 bool) { - cfg.RegionMaxSizeMB = typeutil.ParseMBFromText(cfg.RegionMaxSize, defaultRegionMaxSize) - cfg.RegionSplitSizeMB = typeutil.ParseMBFromText(cfg.RegionSplitSize, defaultRegionSplitSize) - cfg.RegionBucketSizeMB = typeutil.ParseMBFromText(cfg.RegionBucketSize, defaultBucketSize) - - config := m.config.Load().(*StoreConfig) - switchRaftV2 = config.Storage.Engine != raftStoreV2 && cfg.Storage.Engine == raftStoreV2 - m.config.Store(cfg) - return -} - -// GetStoreConfig returns the current store configuration. -func (m *StoreConfigManager) GetStoreConfig() *StoreConfig { - if m == nil { - return nil - } - config := m.config.Load() - return config.(*StoreConfig) -} - -// SetStoreConfig sets the store configuration. -func (m *StoreConfigManager) SetStoreConfig(cfg *StoreConfig) { - if m == nil { - return - } - m.config.Store(cfg) -} - -// Source is used to get the store config. -type Source interface { - GetConfig(statusAddress string) (*StoreConfig, error) -} - -// TiKVConfigSource is used to get the store config from TiKV. -type TiKVConfigSource struct { - schema string - client *http.Client -} - -func newTiKVConfigSource(schema string, client *http.Client) *TiKVConfigSource { - return &TiKVConfigSource{ - schema: schema, - client: client, - } -} - -// GetConfig returns the store config from TiKV. -func (s TiKVConfigSource) GetConfig(statusAddress string) (*StoreConfig, error) { - url := fmt.Sprintf("%s://%s/config", s.schema, statusAddress) - resp, err := s.client.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - var cfg StoreConfig - if err := json.Unmarshal(body, &cfg); err != nil { - return nil, err - } - return &cfg, nil -} - -// FakeSource is used to test. -type FakeSource struct { - whiteList []string -} - -func newFakeSource(whiteList []string) *FakeSource { - return &FakeSource{ - whiteList: whiteList, - } -} - -// GetConfig returns the config. -func (f *FakeSource) GetConfig(url string) (*StoreConfig, error) { - if !slice.Contains(f.whiteList, url) { - return nil, fmt.Errorf("[url:%s] is not in white list", url) - } - config := &StoreConfig{} - config.RegionMaxSize = "10MiB" - config.Storage.Engine = raftStoreV2 - return config, nil -} diff --git a/pkg/schedule/config/store_config_test.go b/pkg/schedule/config/store_config_test.go new file mode 100644 index 00000000000..6db222d4fb0 --- /dev/null +++ b/pkg/schedule/config/store_config_test.go @@ -0,0 +1,70 @@ +// Copyright 2022 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMergeCheck(t *testing.T) { + re := require.New(t) + testdata := []struct { + size uint64 + mergeSize uint64 + keys uint64 + mergeKeys uint64 + pass bool + }{{ + // case 1: the merged region size is smaller than the max region size + size: 96 + 20, + mergeSize: 20, + keys: 1440000 + 200000, + mergeKeys: 200000, + pass: true, + }, { + // case 2: the smallest region is 68MiB, it can't be merged again. + size: 144 + 20, + mergeSize: 20, + keys: 1440000 + 200000, + mergeKeys: 200000, + pass: true, + }, { + // case 3: the smallest region is 50MiB, it can be merged again. + size: 144 + 2, + mergeSize: 50, + keys: 1440000 + 20000, + mergeKeys: 500000, + pass: false, + }, { + // case4: the smallest region is 51MiB, it can't be merged again. + size: 144 + 3, + mergeSize: 50, + keys: 1440000 + 30000, + mergeKeys: 500000, + pass: true, + }} + config := &StoreConfig{} + for _, v := range testdata { + if v.pass { + re.NoError(config.CheckRegionSize(v.size, v.mergeSize)) + re.NoError(config.CheckRegionKeys(v.keys, v.mergeKeys)) + } else { + re.Error(config.CheckRegionSize(v.size, v.mergeSize)) + re.Error(config.CheckRegionKeys(v.keys, v.mergeKeys)) + } + } +} diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 16d72ff913b..58fd3093299 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -37,6 +37,7 @@ import ( "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" @@ -88,14 +89,15 @@ func NewCoordinator(ctx context.Context, cluster sche.ClusterInformer, hbStreams ctx, cancel := context.WithCancel(ctx) opController := operator.NewController(ctx, cluster.GetBasicCluster(), cluster.GetSharedConfig(), hbStreams) schedulers := schedulers.NewController(ctx, cluster, cluster.GetStorage(), opController) + checkers := checker.NewController(ctx, cluster, cluster.GetCheckerConfig(), cluster.GetRuleManager(), cluster.GetRegionLabeler(), opController) return &Coordinator{ ctx: ctx, cancel: cancel, cluster: cluster, prepareChecker: newPrepareChecker(), - checkers: checker.NewController(ctx, cluster, cluster.GetCheckerConfig(), cluster.GetRuleManager(), cluster.GetRegionLabeler(), opController), - regionScatterer: scatter.NewRegionScatterer(ctx, cluster, opController), - regionSplitter: splitter.NewRegionSplitter(cluster, splitter.NewSplitRegionsHandler(cluster, opController)), + checkers: checkers, + regionScatterer: scatter.NewRegionScatterer(ctx, cluster, opController, checkers.AddSuspectRegions), + regionSplitter: splitter.NewRegionSplitter(cluster, splitter.NewSplitRegionsHandler(cluster, opController), checkers.AddSuspectRegions), schedulers: schedulers, opController: opController, hbStreams: hbStreams, @@ -167,7 +169,7 @@ func (c *Coordinator) PatrolRegions() { } func (c *Coordinator) isSchedulingHalted() bool { - return c.cluster.GetPersistOptions().IsSchedulingHalted() + return c.cluster.GetSchedulerConfig().IsSchedulingHalted() } func (c *Coordinator) checkRegions(startKey []byte) (key []byte, regions []*core.RegionInfo) { @@ -374,7 +376,7 @@ func (c *Coordinator) initSchedulers() { log.Fatal("cannot load schedulers' config", errs.ZapError(err)) } - scheduleCfg := c.cluster.GetPersistOptions().GetScheduleConfig().Clone() + scheduleCfg := c.cluster.GetSchedulerConfig().GetScheduleConfig().Clone() // The new way to create scheduler with the independent configuration. for i, name := range scheduleNames { data := configs[i] @@ -433,8 +435,8 @@ func (c *Coordinator) initSchedulers() { // Removes the invalid scheduler config and persist. scheduleCfg.Schedulers = scheduleCfg.Schedulers[:k] - c.cluster.GetPersistOptions().SetScheduleConfig(scheduleCfg) - if err := c.cluster.GetPersistOptions().Persist(c.cluster.GetStorage()); err != nil { + c.cluster.GetSchedulerConfig().SetScheduleConfig(scheduleCfg) + if err := c.cluster.GetSchedulerConfig().Persist(c.cluster.GetStorage()); err != nil { log.Error("cannot persist schedule config", errs.ZapError(err)) } } @@ -503,18 +505,18 @@ func (c *Coordinator) Stop() { } // GetHotRegionsByType gets hot regions' statistics by RWType. -func (c *Coordinator) GetHotRegionsByType(typ statistics.RWType) *statistics.StoreHotPeersInfos { +func (c *Coordinator) GetHotRegionsByType(typ utils.RWType) *statistics.StoreHotPeersInfos { isTraceFlow := c.cluster.GetSchedulerConfig().IsTraceRegionFlow() storeLoads := c.cluster.GetStoresLoads() stores := c.cluster.GetStores() var infos *statistics.StoreHotPeersInfos switch typ { - case statistics.Write: + case utils.Write: regionStats := c.cluster.RegionWriteStats() - infos = statistics.GetHotStatus(stores, storeLoads, regionStats, statistics.Write, isTraceFlow) - case statistics.Read: + infos = statistics.GetHotStatus(stores, storeLoads, regionStats, utils.Write, isTraceFlow) + case utils.Read: regionStats := c.cluster.RegionReadStats() - infos = statistics.GetHotStatus(stores, storeLoads, regionStats, statistics.Read, isTraceFlow) + infos = statistics.GetHotStatus(stores, storeLoads, regionStats, utils.Read, isTraceFlow) default: } // update params `IsLearner` and `LastUpdateTime` @@ -528,11 +530,11 @@ func (c *Coordinator) GetHotRegionsByType(typ statistics.RWType) *statistics.Sto h.IsLearner = core.IsLearner(region.GetPeer(h.StoreID)) } switch typ { - case statistics.Write: + case utils.Write: if region != nil { h.LastUpdateTime = time.Unix(int64(region.GetInterval().GetEndTimestamp()), 0) } - case statistics.Read: + case utils.Read: store := c.cluster.GetStore(h.StoreID) if store != nil { ts := store.GetMeta().GetLastHeartbeat() @@ -555,24 +557,24 @@ func (c *Coordinator) GetWaitGroup() *sync.WaitGroup { func (c *Coordinator) CollectHotSpotMetrics() { stores := c.cluster.GetStores() // Collects hot write region metrics. - collectHotMetrics(c.cluster, stores, statistics.Write) + collectHotMetrics(c.cluster, stores, utils.Write) // Collects hot read region metrics. - collectHotMetrics(c.cluster, stores, statistics.Read) + collectHotMetrics(c.cluster, stores, utils.Read) } -func collectHotMetrics(cluster sche.ClusterInformer, stores []*core.StoreInfo, typ statistics.RWType) { +func collectHotMetrics(cluster sche.ClusterInformer, stores []*core.StoreInfo, typ utils.RWType) { var ( kind string regionStats map[uint64][]*statistics.HotPeerStat ) switch typ { - case statistics.Read: + case utils.Read: regionStats = cluster.RegionReadStats() - kind = statistics.Read.String() - case statistics.Write: + kind = utils.Read.String() + case utils.Write: regionStats = cluster.RegionWriteStats() - kind = statistics.Write.String() + kind = utils.Write.String() } status := statistics.CollectHotPeerInfos(stores, regionStats) // only returns TotalBytesRate,TotalKeysRate,TotalQueryRate,Count @@ -608,8 +610,8 @@ func collectHotMetrics(cluster sche.ClusterInformer, stores []*core.StoreInfo, t } if !hasHotLeader && !hasHotPeer { - statistics.ForeachRegionStats(func(rwTy statistics.RWType, dim int, _ statistics.RegionStatKind) { - schedulers.HotPendingSum.DeleteLabelValues(storeLabel, rwTy.String(), statistics.DimToString(dim)) + utils.ForeachRegionStats(func(rwTy utils.RWType, dim int, _ utils.RegionStatKind) { + schedulers.HotPendingSum.DeleteLabelValues(storeLabel, rwTy.String(), utils.DimToString(dim)) }) } } diff --git a/pkg/schedule/core/cluster_informer.go b/pkg/schedule/core/cluster_informer.go index 388d9dcf3cd..63dacd0c30d 100644 --- a/pkg/schedule/core/cluster_informer.go +++ b/pkg/schedule/core/cluster_informer.go @@ -22,18 +22,15 @@ import ( "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/server/config" ) // ClusterInformer provides the necessary information of a cluster. type ClusterInformer interface { SchedulerCluster CheckerCluster - ScatterCluster GetStorage() storage.Storage UpdateRegionsLabelLevelStats(regions []*core.RegionInfo) - GetPersistOptions() *config.PersistOptions } // SchedulerCluster is an aggregate interface that wraps multiple interfaces @@ -56,13 +53,6 @@ type CheckerCluster interface { GetStoreConfig() sc.StoreConfigProvider } -// ScatterCluster is an aggregate interface that wraps multiple interfaces -type ScatterCluster interface { - SharedCluster - - AddSuspectRegions(ids ...uint64) -} - // SharedCluster is an aggregate interface that wraps multiple interfaces type SharedCluster interface { BasicCluster diff --git a/pkg/schedule/labeler/labeler.go b/pkg/schedule/labeler/labeler.go index e80a75fc904..c525ac5c44f 100644 --- a/pkg/schedule/labeler/labeler.go +++ b/pkg/schedule/labeler/labeler.go @@ -16,7 +16,6 @@ package labeler import ( "context" - "encoding/json" "strings" "time" @@ -107,18 +106,19 @@ func (l *RegionLabeler) checkAndClearExpiredLabels() { func (l *RegionLabeler) loadRules() error { var toDelete []string err := l.storage.LoadRegionRules(func(k, v string) { - var r LabelRule - if err := json.Unmarshal([]byte(v), &r); err != nil { + r, err := NewLabelRuleFromJSON([]byte(v)) + if err != nil { log.Error("failed to unmarshal label rule value", zap.String("rule-key", k), zap.String("rule-value", v), errs.ZapError(errs.ErrLoadRule)) toDelete = append(toDelete, k) return } - if err := r.checkAndAdjust(); err != nil { + err = r.checkAndAdjust() + if err != nil { log.Error("failed to adjust label rule", zap.String("rule-key", k), zap.String("rule-value", v), zap.Error(err)) toDelete = append(toDelete, k) return } - l.labelRules[r.ID] = &r + l.labelRules[r.ID] = r }) if err != nil { return err @@ -298,7 +298,7 @@ func (l *RegionLabeler) GetRegionLabel(region *core.RegionInfo, key string) stri // ScheduleDisabled returns true if the region is lablelld with schedule-disabled. func (l *RegionLabeler) ScheduleDisabled(region *core.RegionInfo) bool { v := l.GetRegionLabel(region, scheduleOptionLabel) - return strings.EqualFold(v, scheduleOptioonValueDeny) + return strings.EqualFold(v, scheduleOptionValueDeny) } // GetRegionLabels returns the labels of the region. @@ -335,3 +335,12 @@ func (l *RegionLabeler) GetRegionLabels(region *core.RegionInfo) []*RegionLabel } return result } + +// MakeKeyRanges is a helper function to make key ranges. +func MakeKeyRanges(keys ...string) []interface{} { + var res []interface{} + for i := 0; i < len(keys); i += 2 { + res = append(res, map[string]interface{}{"start_key": keys[i], "end_key": keys[i+1]}) + } + return res +} diff --git a/pkg/schedule/labeler/labeler_test.go b/pkg/schedule/labeler/labeler_test.go index e308f3307ad..f38c6321c01 100644 --- a/pkg/schedule/labeler/labeler_test.go +++ b/pkg/schedule/labeler/labeler_test.go @@ -38,7 +38,7 @@ func TestAdjustRule(t *testing.T) { {Key: "k1", Value: "v1"}, }, RuleType: "key-range", - Data: makeKeyRanges("12abcd", "34cdef", "56abcd", "78cdef"), + Data: MakeKeyRanges("12abcd", "34cdef", "56abcd", "78cdef"), } err := rule.checkAndAdjust() re.NoError(err) @@ -94,9 +94,9 @@ func TestGetSetRule(t *testing.T) { labeler, err := NewRegionLabeler(context.Background(), store, time.Millisecond*10) re.NoError(err) rules := []*LabelRule{ - {ID: "rule1", Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: makeKeyRanges("1234", "5678")}, - {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: makeKeyRanges("ab12", "cd12")}, - {ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3"}}, RuleType: "key-range", Data: makeKeyRanges("abcd", "efef")}, + {ID: "rule1", Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: MakeKeyRanges("1234", "5678")}, + {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: MakeKeyRanges("ab12", "cd12")}, + {ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3"}}, RuleType: "key-range", Data: MakeKeyRanges("abcd", "efef")}, } for _, r := range rules { err := labeler.SetLabelRule(r) @@ -121,7 +121,7 @@ func TestGetSetRule(t *testing.T) { // patch patch := LabelRulePatch{ SetRules: []*LabelRule{ - {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: makeKeyRanges("ab12", "cd12")}, + {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: MakeKeyRanges("ab12", "cd12")}, }, DeleteRules: []string{"rule1"}, } @@ -140,10 +140,10 @@ func TestIndex(t *testing.T) { labeler, err := NewRegionLabeler(context.Background(), store, time.Millisecond*10) re.NoError(err) rules := []*LabelRule{ - {ID: "rule0", Labels: []RegionLabel{{Key: "k1", Value: "v0"}}, RuleType: "key-range", Data: makeKeyRanges("", "")}, - {ID: "rule1", Index: 1, Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: makeKeyRanges("1234", "5678")}, - {ID: "rule2", Index: 2, Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: makeKeyRanges("ab12", "cd12")}, - {ID: "rule3", Index: 1, Labels: []RegionLabel{{Key: "k2", Value: "v3"}}, RuleType: "key-range", Data: makeKeyRanges("abcd", "efef")}, + {ID: "rule0", Labels: []RegionLabel{{Key: "k1", Value: "v0"}}, RuleType: "key-range", Data: MakeKeyRanges("", "")}, + {ID: "rule1", Index: 1, Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: MakeKeyRanges("1234", "5678")}, + {ID: "rule2", Index: 2, Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: MakeKeyRanges("ab12", "cd12")}, + {ID: "rule3", Index: 1, Labels: []RegionLabel{{Key: "k2", Value: "v3"}}, RuleType: "key-range", Data: MakeKeyRanges("abcd", "efef")}, } for _, r := range rules { err := labeler.SetLabelRule(r) @@ -182,9 +182,9 @@ func TestSaveLoadRule(t *testing.T) { labeler, err := NewRegionLabeler(context.Background(), store, time.Millisecond*10) re.NoError(err) rules := []*LabelRule{ - {ID: "rule1", Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: makeKeyRanges("1234", "5678")}, - {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: makeKeyRanges("ab12", "cd12")}, - {ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3"}}, RuleType: "key-range", Data: makeKeyRanges("abcd", "efef")}, + {ID: "rule1", Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: MakeKeyRanges("1234", "5678")}, + {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: MakeKeyRanges("ab12", "cd12")}, + {ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3"}}, RuleType: "key-range", Data: MakeKeyRanges("abcd", "efef")}, } for _, r := range rules { err := labeler.SetLabelRule(r) @@ -227,9 +227,9 @@ func TestKeyRange(t *testing.T) { labeler, err := NewRegionLabeler(context.Background(), store, time.Millisecond*10) re.NoError(err) rules := []*LabelRule{ - {ID: "rule1", Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: makeKeyRanges("1234", "5678")}, - {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: makeKeyRanges("ab12", "cd12")}, - {ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3"}}, RuleType: "key-range", Data: makeKeyRanges("abcd", "efef")}, + {ID: "rule1", Labels: []RegionLabel{{Key: "k1", Value: "v1"}}, RuleType: "key-range", Data: MakeKeyRanges("1234", "5678")}, + {ID: "rule2", Labels: []RegionLabel{{Key: "k2", Value: "v2"}}, RuleType: "key-range", Data: MakeKeyRanges("ab12", "cd12")}, + {ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3"}}, RuleType: "key-range", Data: MakeKeyRanges("abcd", "efef")}, } for _, r := range rules { err := labeler.SetLabelRule(r) @@ -274,7 +274,7 @@ func TestLabelerRuleTTL(t *testing.T) { {Key: "k1", Value: "v1"}, }, RuleType: "key-range", - Data: makeKeyRanges("1234", "5678")}, + Data: MakeKeyRanges("1234", "5678")}, { ID: "rule2", Labels: []RegionLabel{ @@ -282,13 +282,13 @@ func TestLabelerRuleTTL(t *testing.T) { }, RuleType: "key-range", - Data: makeKeyRanges("1234", "5678")}, + Data: MakeKeyRanges("1234", "5678")}, { ID: "rule3", Labels: []RegionLabel{{Key: "k3", Value: "v3", TTL: "1h"}}, RuleType: "key-range", - Data: makeKeyRanges("1234", "5678")}, + Data: MakeKeyRanges("1234", "5678")}, } start, _ := hex.DecodeString("1234") @@ -354,7 +354,7 @@ func TestGC(t *testing.T) { ID: fmt.Sprintf("rule%d", id), Labels: labels, RuleType: "key-range", - Data: makeKeyRanges("1234", "5678")} + Data: MakeKeyRanges("1234", "5678")} err := labeler.SetLabelRule(rule) re.NoError(err) } @@ -380,11 +380,3 @@ func TestGC(t *testing.T) { labeler.RUnlock() re.LessOrEqual(currentRuleLen, 5) } - -func makeKeyRanges(keys ...string) []interface{} { - var res []interface{} - for i := 0; i < len(keys); i += 2 { - res = append(res, map[string]interface{}{"start_key": keys[i], "end_key": keys[i+1]}) - } - return res -} diff --git a/pkg/schedule/labeler/rules.go b/pkg/schedule/labeler/rules.go index 3b50779d659..5726a9f904e 100644 --- a/pkg/schedule/labeler/rules.go +++ b/pkg/schedule/labeler/rules.go @@ -17,6 +17,7 @@ package labeler import ( "bytes" "encoding/hex" + "encoding/json" "fmt" "reflect" "time" @@ -48,14 +49,24 @@ type LabelRule struct { minExpire *time.Time } +// NewLabelRuleFromJSON creates a label rule from the JSON data. +func NewLabelRuleFromJSON(data []byte) (*LabelRule, error) { + lr := &LabelRule{} + err := json.Unmarshal(data, lr) + if err != nil { + return nil, err + } + return lr, nil +} + const ( // KeyRange is the rule type that specifies a list of key ranges. KeyRange = "key-range" ) const ( - scheduleOptionLabel = "schedule" - scheduleOptioonValueDeny = "deny" + scheduleOptionLabel = "schedule" + scheduleOptionValueDeny = "deny" ) // KeyRangeRule contains the start key and end key of the LabelRule. diff --git a/pkg/schedule/placement/rule.go b/pkg/schedule/placement/rule.go index 5b8b488c6af..7e57866512d 100644 --- a/pkg/schedule/placement/rule.go +++ b/pkg/schedule/placement/rule.go @@ -73,6 +73,15 @@ type Rule struct { group *RuleGroup // only set at runtime, no need to {,un}marshal or persist. } +// NewRuleFromJSON creates a rule from the JSON data. +func NewRuleFromJSON(data []byte) (*Rule, error) { + r := &Rule{} + if err := json.Unmarshal(data, r); err != nil { + return nil, err + } + return r, nil +} + func (r *Rule) String() string { b, _ := json.Marshal(r) return string(b) @@ -112,6 +121,15 @@ type RuleGroup struct { Override bool `json:"override,omitempty"` } +// NewRuleGroupFromJSON creates a rule group from the JSON data. +func NewRuleGroupFromJSON(data []byte) (*RuleGroup, error) { + rg := &RuleGroup{} + if err := json.Unmarshal(data, rg); err != nil { + return nil, err + } + return rg, nil +} + func (g *RuleGroup) isDefault() bool { return g.Index == 0 && !g.Override } diff --git a/pkg/schedule/placement/rule_manager.go b/pkg/schedule/placement/rule_manager.go index 0237e0d1fed..3bd272a00ac 100644 --- a/pkg/schedule/placement/rule_manager.go +++ b/pkg/schedule/placement/rule_manager.go @@ -135,18 +135,20 @@ func (m *RuleManager) loadRules() error { var toSave []*Rule var toDelete []string err := m.storage.LoadRules(func(k, v string) { - var r Rule - if err := json.Unmarshal([]byte(v), &r); err != nil { + r, err := NewRuleFromJSON([]byte(v)) + if err != nil { log.Error("failed to unmarshal rule value", zap.String("rule-key", k), zap.String("rule-value", v), errs.ZapError(errs.ErrLoadRule)) toDelete = append(toDelete, k) return } - if err := m.adjustRule(&r, ""); err != nil { + err = m.adjustRule(r, "") + if err != nil { log.Error("rule is in bad format", zap.String("rule-key", k), zap.String("rule-value", v), errs.ZapError(errs.ErrLoadRule, err)) toDelete = append(toDelete, k) return } - if _, ok := m.ruleConfig.rules[r.Key()]; ok { + _, ok := m.ruleConfig.rules[r.Key()] + if ok { log.Error("duplicated rule key", zap.String("rule-key", k), zap.String("rule-value", v), errs.ZapError(errs.ErrLoadRule)) toDelete = append(toDelete, k) return @@ -154,9 +156,9 @@ func (m *RuleManager) loadRules() error { if k != r.StoreKey() { log.Error("mismatch data key, need to restore", zap.String("rule-key", k), zap.String("rule-value", v), errs.ZapError(errs.ErrLoadRule)) toDelete = append(toDelete, k) - toSave = append(toSave, &r) + toSave = append(toSave, r) } - m.ruleConfig.rules[r.Key()] = &r + m.ruleConfig.rules[r.Key()] = r }) if err != nil { return err @@ -176,12 +178,12 @@ func (m *RuleManager) loadRules() error { func (m *RuleManager) loadGroups() error { return m.storage.LoadRuleGroups(func(k, v string) { - var g RuleGroup - if err := json.Unmarshal([]byte(v), &g); err != nil { + g, err := NewRuleGroupFromJSON([]byte(v)) + if err != nil { log.Error("failed to unmarshal rule group", zap.String("group-id", k), errs.ZapError(errs.ErrLoadRuleGroup, err)) return } - m.ruleConfig.groups[g.ID] = &g + m.ruleConfig.groups[g.ID] = g }) } diff --git a/pkg/schedule/scatter/region_scatterer.go b/pkg/schedule/scatter/region_scatterer.go index 75b06316294..c31461eb06f 100644 --- a/pkg/schedule/scatter/region_scatterer.go +++ b/pkg/schedule/scatter/region_scatterer.go @@ -136,22 +136,24 @@ func (s *selectedStores) getDistributionByGroupLocked(group string) (map[uint64] // RegionScatterer scatters regions. type RegionScatterer struct { - ctx context.Context - name string - cluster sche.ScatterCluster - ordinaryEngine engineContext - specialEngines sync.Map - opController *operator.Controller + ctx context.Context + name string + cluster sche.SharedCluster + ordinaryEngine engineContext + specialEngines sync.Map + opController *operator.Controller + addSuspectRegions func(regionIDs ...uint64) } // NewRegionScatterer creates a region scatterer. // RegionScatter is used for the `Lightning`, it will scatter the specified regions before import data. -func NewRegionScatterer(ctx context.Context, cluster sche.ScatterCluster, opController *operator.Controller) *RegionScatterer { +func NewRegionScatterer(ctx context.Context, cluster sche.SharedCluster, opController *operator.Controller, addSuspectRegions func(regionIDs ...uint64)) *RegionScatterer { return &RegionScatterer{ - ctx: ctx, - name: regionScatterName, - cluster: cluster, - opController: opController, + ctx: ctx, + name: regionScatterName, + cluster: cluster, + opController: opController, + addSuspectRegions: addSuspectRegions, ordinaryEngine: newEngineContext(ctx, func() filter.Filter { return filter.NewEngineFilter(regionScatterName, filter.NotSpecialEngines) }), @@ -283,7 +285,7 @@ func (r *RegionScatterer) scatterRegions(regions map[uint64]*core.RegionInfo, fa // in a group level instead of cluster level. func (r *RegionScatterer) Scatter(region *core.RegionInfo, group string, skipStoreLimit bool) (*operator.Operator, error) { if !filter.IsRegionReplicated(r.cluster, region) { - r.cluster.AddSuspectRegions(region.GetID()) + r.addSuspectRegions(region.GetID()) scatterSkipNotReplicatedCounter.Inc() log.Warn("region not replicated during scatter", zap.Uint64("region-id", region.GetID())) return nil, errors.Errorf("region %d is not fully replicated", region.GetID()) diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index f4ee5953e6f..c0724e481f6 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -105,7 +105,7 @@ func scatter(re *require.Assertions, numStores, numRegions uint64, useRules bool // region distributed in same stores. tc.AddLeaderRegion(i, 1, 2, 3) } - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) for i := uint64(1); i <= numRegions; i++ { region := tc.GetRegion(i) @@ -175,7 +175,7 @@ func scatterSpecial(re *require.Assertions, numOrdinaryStores, numSpecialStores, []uint64{numOrdinaryStores + 1, numOrdinaryStores + 2, numOrdinaryStores + 3}, ) } - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) for i := uint64(1); i <= numRegions; i++ { region := tc.GetRegion(i) @@ -242,7 +242,7 @@ func TestStoreLimit(t *testing.T) { tc.AddLeaderRegion(i, seq.next(), seq.next(), seq.next()) } - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) for i := uint64(1); i <= 5; i++ { region := tc.GetRegion(i) @@ -287,7 +287,7 @@ func TestScatterCheck(t *testing.T) { } for _, testCase := range testCases { t.Log(testCase.name) - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) _, err := scatterer.Scatter(testCase.checkRegion, "", false) if testCase.needFix { re.Error(err) @@ -328,7 +328,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { tc.SetStoreLastHeartbeatInterval(i, 40*time.Minute) } re.Equal(tc.GetStore(uint64(6)).IsDisconnected(), true) - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) var wg sync.WaitGroup for j := 0; j < 10; j++ { wg.Add(1) @@ -382,7 +382,7 @@ func TestScatterGroupInConcurrency(t *testing.T) { // We send scatter interweave request for each group to simulate scattering multiple region groups in concurrency. for _, testCase := range testCases { t.Log(testCase.name) - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) regionID := 1 for i := 0; i < 100; i++ { for j := 0; j < testCase.groupCount; j++ { @@ -433,7 +433,7 @@ func TestScatterForManyRegion(t *testing.T) { tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute) } - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) regions := make(map[uint64]*core.RegionInfo) for i := 1; i <= 1200; i++ { regions[uint64(i)] = tc.AddLightWeightLeaderRegion(uint64(i), 1, 2, 3) @@ -475,7 +475,7 @@ func TestScattersGroup(t *testing.T) { for id, testCase := range testCases { group := fmt.Sprintf("gourp-%d", id) t.Log(testCase.name) - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) regions := map[uint64]*core.RegionInfo{} for i := 1; i <= 100; i++ { regions[uint64(i)] = tc.AddLightWeightLeaderRegion(uint64(i), 1, 2, 3) @@ -548,7 +548,7 @@ func TestRegionFromDifferentGroups(t *testing.T) { for i := uint64(1); i <= uint64(storeCount); i++ { tc.AddRegionStore(i, 0) } - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) regionCount := 50 for i := 1; i <= regionCount; i++ { p := rand.Perm(storeCount) @@ -614,7 +614,7 @@ func TestRegionHasLearner(t *testing.T) { }, }, }) - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) regionCount := 50 for i := 1; i <= regionCount; i++ { _, err := scatterer.Scatter(tc.AddRegionWithLearner(uint64(i), uint64(1), []uint64{uint64(2), uint64(3)}, []uint64{7}), "group", false) @@ -674,7 +674,7 @@ func TestSelectedStoresTooFewPeers(t *testing.T) { tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute) } group := "group" - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) // Put a lot of regions in Store 1/2/3. for i := uint64(1); i < 100; i++ { @@ -711,7 +711,7 @@ func TestSelectedStoresTooManyPeers(t *testing.T) { tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute) } group := "group" - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) // priority 4 > 1 > 5 > 2 == 3 for i := 0; i < 1200; i++ { scatterer.ordinaryEngine.selectedPeer.Put(2, group) @@ -749,7 +749,7 @@ func TestBalanceRegion(t *testing.T) { tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute) } group := "group" - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) for i := uint64(1001); i <= 1300; i++ { region := tc.AddLeaderRegion(i, 2, 4, 6) op := scatterer.scatterRegion(region, group, false) @@ -807,7 +807,7 @@ func TestRemoveStoreLimit(t *testing.T) { tc.AddLeaderRegion(i, seq.next(), seq.next(), seq.next()) } - scatterer := NewRegionScatterer(ctx, tc, oc) + scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddSuspectRegions) for i := uint64(1); i <= 5; i++ { region := tc.GetRegion(i) diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index d0c0cc9a1be..4c8051de677 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -234,7 +235,7 @@ func (s *grantHotRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun return s.dispatch(rw, cluster), nil } -func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster sche.SchedulerCluster) []*operator.Operator { +func (s *grantHotRegionScheduler) dispatch(typ utils.RWType, cluster sche.SchedulerCluster) []*operator.Operator { stLoadInfos := s.stLoadInfos[buildResourceType(typ, constant.RegionKind)] infos := make([]*statistics.StoreLoadDetail, len(stLoadInfos)) index := 0 @@ -243,7 +244,7 @@ func (s *grantHotRegionScheduler) dispatch(typ statistics.RWType, cluster sche.S index++ } sort.Slice(infos, func(i, j int) bool { - return infos[i].LoadPred.Current.Loads[statistics.ByteDim] > infos[j].LoadPred.Current.Loads[statistics.ByteDim] + return infos[i].LoadPred.Current.Loads[utils.ByteDim] > infos[j].LoadPred.Current.Loads[utils.ByteDim] }) return s.randomSchedule(cluster, infos) } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 1100e980843..f7bd27cface 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -37,6 +37,7 @@ import ( "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/keyutil" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" @@ -99,7 +100,7 @@ type baseHotScheduler struct { // this records regionID which have pending Operator by operation type. During filterHotPeers, the hot peers won't // be selected if its owner region is tracked in this attribute. regionPendings map[uint64]*pendingInfluence - types []statistics.RWType + types []utils.RWType r *rand.Rand updateReadTime time.Time updateWriteTime time.Time @@ -109,9 +110,9 @@ func newBaseHotScheduler(opController *operator.Controller) *baseHotScheduler { base := NewBaseScheduler(opController) ret := &baseHotScheduler{ BaseScheduler: base, - types: []statistics.RWType{statistics.Write, statistics.Read}, + types: []utils.RWType{utils.Write, utils.Read}, regionPendings: make(map[uint64]*pendingInfluence), - stHistoryLoads: statistics.NewStoreHistoryLoads(statistics.DimLen), + stHistoryLoads: statistics.NewStoreHistoryLoads(utils.DimLen), r: rand.New(rand.NewSource(time.Now().UnixNano())), } for ty := resourceType(0); ty < resourceTypeLen; ty++ { @@ -122,7 +123,7 @@ func newBaseHotScheduler(opController *operator.Controller) *baseHotScheduler { // prepareForBalance calculate the summary of pending Influence for each store and prepare the load detail for // each store, only update read or write load detail -func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sche.SchedulerCluster) { +func (h *baseHotScheduler) prepareForBalance(rw utils.RWType, cluster sche.SchedulerCluster) { h.stInfos = statistics.SummaryStoreInfos(cluster.GetStores()) h.summaryPendingInfluence() h.storesLoads = cluster.GetStoresLoads() @@ -139,7 +140,7 @@ func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sche. rw, resource) } switch rw { - case statistics.Read: + case utils.Read: // update read statistics if time.Since(h.updateReadTime) >= statisticsInterval { regionRead := cluster.RegionReadStats() @@ -147,7 +148,7 @@ func (h *baseHotScheduler) prepareForBalance(rw statistics.RWType, cluster sche. prepare(regionRead, constant.RegionKind) h.updateReadTime = time.Now() } - case statistics.Write: + case utils.Write: // update write statistics if time.Since(h.updateWriteTime) >= statisticsInterval { regionWrite := cluster.RegionWriteStats() @@ -185,8 +186,8 @@ func (h *baseHotScheduler) summaryPendingInfluence() { for storeID, info := range h.stInfos { storeLabel := strconv.FormatUint(storeID, 10) if infl := info.PendingSum; infl != nil { - statistics.ForeachRegionStats(func(rwTy statistics.RWType, dim int, kind statistics.RegionStatKind) { - setHotPendingInfluenceMetrics(storeLabel, rwTy.String(), statistics.DimToString(dim), infl.Loads[kind]) + utils.ForeachRegionStats(func(rwTy utils.RWType, dim int, kind utils.RegionStatKind) { + setHotPendingInfluenceMetrics(storeLabel, rwTy.String(), utils.DimToString(dim), infl.Loads[kind]) }) } } @@ -197,7 +198,7 @@ func setHotPendingInfluenceMetrics(storeLabel, rwTy, dim string, load float64) { HotPendingSum.WithLabelValues(storeLabel, rwTy, dim).Set(load) } -func (h *baseHotScheduler) randomRWType() statistics.RWType { +func (h *baseHotScheduler) randomRWType() utils.RWType { return h.types[h.r.Int()%len(h.types)] } @@ -285,7 +286,7 @@ func (h *hotScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]* return h.dispatch(rw, cluster), nil } -func (h *hotScheduler) dispatch(typ statistics.RWType, cluster sche.SchedulerCluster) []*operator.Operator { +func (h *hotScheduler) dispatch(typ utils.RWType, cluster sche.SchedulerCluster) []*operator.Operator { h.Lock() defer h.Unlock() h.prepareForBalance(typ, cluster) @@ -294,9 +295,9 @@ func (h *hotScheduler) dispatch(typ statistics.RWType, cluster sche.SchedulerClu return nil } switch typ { - case statistics.Read: + case utils.Read: return h.balanceHotReadRegions(cluster) - case statistics.Write: + case utils.Write: return h.balanceHotWriteRegions(cluster) } return nil @@ -313,16 +314,16 @@ func (h *hotScheduler) tryAddPendingInfluence(op *operator.Operator, srcStore [] influence := newPendingInfluence(op, srcStore, dstStore, infl, maxZombieDur) h.regionPendings[regionID] = influence - statistics.ForeachRegionStats(func(rwTy statistics.RWType, dim int, kind statistics.RegionStatKind) { - hotPeerHist.WithLabelValues(h.GetName(), rwTy.String(), statistics.DimToString(dim)).Observe(infl.Loads[kind]) + utils.ForeachRegionStats(func(rwTy utils.RWType, dim int, kind utils.RegionStatKind) { + hotPeerHist.WithLabelValues(h.GetName(), rwTy.String(), utils.DimToString(dim)).Observe(infl.Loads[kind]) }) return true } func (h *hotScheduler) balanceHotReadRegions(cluster sche.SchedulerCluster) []*operator.Operator { - leaderSolver := newBalanceSolver(h, cluster, statistics.Read, transferLeader) + leaderSolver := newBalanceSolver(h, cluster, utils.Read, transferLeader) leaderOps := leaderSolver.solve() - peerSolver := newBalanceSolver(h, cluster, statistics.Read, movePeer) + peerSolver := newBalanceSolver(h, cluster, utils.Read, movePeer) peerOps := peerSolver.solve() if len(leaderOps) == 0 && len(peerOps) == 0 { hotSchedulerSkipCounter.Inc() @@ -367,7 +368,7 @@ func (h *hotScheduler) balanceHotWriteRegions(cluster sche.SchedulerCluster) []* s := h.r.Intn(100) switch { case s < int(schedulePeerPr*100): - peerSolver := newBalanceSolver(h, cluster, statistics.Write, movePeer) + peerSolver := newBalanceSolver(h, cluster, utils.Write, movePeer) ops := peerSolver.solve() if len(ops) > 0 && peerSolver.tryAddPendingInfluence() { return ops @@ -375,7 +376,7 @@ func (h *hotScheduler) balanceHotWriteRegions(cluster sche.SchedulerCluster) []* default: } - leaderSolver := newBalanceSolver(h, cluster, statistics.Write, transferLeader) + leaderSolver := newBalanceSolver(h, cluster, utils.Write, transferLeader) ops := leaderSolver.solve() if len(ops) > 0 && leaderSolver.tryAddPendingInfluence() { return ops @@ -429,7 +430,7 @@ func (s *solution) getPendingLoad(dim int) (src float64, dst float64) { // calcPeersRate precomputes the peer rate and stores it in cachedPeersRate. func (s *solution) calcPeersRate(dims ...int) { - s.cachedPeersRate = make([]float64, statistics.DimLen) + s.cachedPeersRate = make([]float64, utils.DimLen) for _, dim := range dims { peersRate := s.mainPeerStat.GetLoad(dim) if s.revertPeerStat != nil { @@ -456,7 +457,7 @@ type balanceSolver struct { stLoadDetail map[uint64]*statistics.StoreLoadDetail filteredHotPeers map[uint64][]*statistics.HotPeerStat // storeID -> hotPeers(filtered) nthHotPeer map[uint64][]*statistics.HotPeerStat // storeID -> [dimLen]hotPeers - rwTy statistics.RWType + rwTy utils.RWType opTy opType resourceTy resourceType @@ -511,15 +512,15 @@ func (bs *balanceSolver) init() { // Init store load detail according to the type. bs.stLoadDetail = bs.sche.stLoadInfos[bs.resourceTy] - bs.maxSrc = &statistics.StoreLoad{Loads: make([]float64, statistics.DimLen)} + bs.maxSrc = &statistics.StoreLoad{Loads: make([]float64, utils.DimLen)} bs.minDst = &statistics.StoreLoad{ - Loads: make([]float64, statistics.DimLen), + Loads: make([]float64, utils.DimLen), Count: math.MaxFloat64, } for i := range bs.minDst.Loads { bs.minDst.Loads[i] = math.MaxFloat64 } - maxCur := &statistics.StoreLoad{Loads: make([]float64, statistics.DimLen)} + maxCur := &statistics.StoreLoad{Loads: make([]float64, utils.DimLen)} bs.filteredHotPeers = make(map[uint64][]*statistics.HotPeerStat) bs.nthHotPeer = make(map[uint64][]*statistics.HotPeerStat) @@ -527,15 +528,15 @@ func (bs *balanceSolver) init() { bs.maxSrc = statistics.MaxLoad(bs.maxSrc, detail.LoadPred.Min()) bs.minDst = statistics.MinLoad(bs.minDst, detail.LoadPred.Max()) maxCur = statistics.MaxLoad(maxCur, &detail.LoadPred.Current) - bs.nthHotPeer[detail.GetID()] = make([]*statistics.HotPeerStat, statistics.DimLen) + bs.nthHotPeer[detail.GetID()] = make([]*statistics.HotPeerStat, utils.DimLen) bs.filteredHotPeers[detail.GetID()] = bs.filterHotPeers(detail) } rankStepRatios := []float64{ - statistics.ByteDim: bs.sche.conf.GetByteRankStepRatio(), - statistics.KeyDim: bs.sche.conf.GetKeyRankStepRatio(), - statistics.QueryDim: bs.sche.conf.GetQueryRateRankStepRatio()} - stepLoads := make([]float64, statistics.DimLen) + utils.ByteDim: bs.sche.conf.GetByteRankStepRatio(), + utils.KeyDim: bs.sche.conf.GetKeyRankStepRatio(), + utils.QueryDim: bs.sche.conf.GetQueryRateRankStepRatio()} + stepLoads := make([]float64, utils.DimLen) for i := range stepLoads { stepLoads[i] = maxCur.Loads[i] * rankStepRatios[i] } @@ -590,7 +591,7 @@ func (bs *balanceSolver) getPriorities() []string { return []string{} } -func newBalanceSolver(sche *hotScheduler, cluster sche.SchedulerCluster, rwTy statistics.RWType, opTy opType) *balanceSolver { +func newBalanceSolver(sche *hotScheduler, cluster sche.SchedulerCluster, rwTy utils.RWType, opTy opType) *balanceSolver { bs := &balanceSolver{ SchedulerCluster: cluster, sche: sche, @@ -717,7 +718,7 @@ func (bs *balanceSolver) solve() []*operator.Operator { } func (bs *balanceSolver) skipCounter(label string) prometheus.Counter { - if bs.rwTy == statistics.Read { + if bs.rwTy == utils.Read { switch label { case "byte": return readSkipByteDimUniformStoreCounter @@ -785,7 +786,7 @@ func (bs *balanceSolver) tryAddPendingInfluence() bool { } func (bs *balanceSolver) collectPendingInfluence(peer *statistics.HotPeerStat) statistics.Influence { - infl := statistics.Influence{Loads: make([]float64, statistics.RegionStatCount), Count: 1} + infl := statistics.Influence{Loads: make([]float64, utils.RegionStatCount), Count: 1} bs.rwTy.SetFullLoadRates(infl.Loads, peer.GetLoads()) inverse := bs.rwTy.Inverse() another := bs.GetHotPeerStat(inverse, peer.RegionID, peer.StoreID) @@ -800,7 +801,7 @@ func (bs *balanceSolver) collectPendingInfluence(peer *statistics.HotPeerStat) s func (bs *balanceSolver) calcMaxZombieDur() time.Duration { switch bs.resourceTy { case writeLeader: - if bs.firstPriority == statistics.QueryDim { + if bs.firstPriority == utils.QueryDim { // We use store query info rather than total of hot write leader to guide hot write leader scheduler // when its first priority is `QueryDim`, because `Write-peer` does not have `QueryDim`. // The reason is the same with `tikvCollector.GetLoads`. @@ -829,7 +830,7 @@ func (bs *balanceSolver) filterSrcStores() map[uint64]*statistics.StoreLoadDetai if !confEnableForTiFlash { continue } - if bs.rwTy != statistics.Write || bs.opTy != movePeer { + if bs.rwTy != utils.Write || bs.opTy != movePeer { continue } srcToleranceRatio += tiflashToleranceRatioCorrection @@ -998,7 +999,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai srcStore := bs.cur.srcStore.StoreInfo switch bs.opTy { case movePeer: - if bs.rwTy == statistics.Read && bs.cur.mainPeerStat.IsLeader() { // for hot-read scheduler, only move peer + if bs.rwTy == utils.Read && bs.cur.mainPeerStat.IsLeader() { // for hot-read scheduler, only move peer return nil } filters = []filter.Filter{ @@ -1019,7 +1020,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai &filter.StoreStateFilter{ActionScope: bs.sche.GetName(), TransferLeader: true, OperatorLevel: constant.High}, filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion), } - if bs.rwTy == statistics.Read { + if bs.rwTy == utils.Read { peers := bs.cur.region.GetPeers() moveLeaderFilters := []filter.Filter{&filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true, OperatorLevel: constant.High}} if leaderFilter := filter.NewPlacementLeaderSafeguard(bs.sche.GetName(), bs.GetSchedulerConfig(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore, true /*allowMoveLeader*/); leaderFilter != nil { @@ -1069,7 +1070,7 @@ func (bs *balanceSolver) pickDstStores(filters []filter.Filter, candidates []*st if !confEnableForTiFlash { continue } - if bs.rwTy != statistics.Write || bs.opTy != movePeer { + if bs.rwTy != utils.Write || bs.opTy != movePeer { continue } dstToleranceRatio += tiflashToleranceRatioCorrection @@ -1269,11 +1270,11 @@ func (bs *balanceSolver) isNotWorsened(dim int) bool { func (bs *balanceSolver) getMinRate(dim int) float64 { switch dim { - case statistics.KeyDim: + case utils.KeyDim: return bs.sche.conf.GetMinHotKeyRate() - case statistics.ByteDim: + case utils.ByteDim: return bs.sche.conf.GetMinHotByteRate() - case statistics.QueryDim: + case utils.QueryDim: return bs.sche.conf.GetMinHotQueryRate() } return -1 @@ -1340,10 +1341,10 @@ func (bs *balanceSolver) betterThanV1(old *solution) bool { return false } -var dimToStep = [statistics.DimLen]float64{ - statistics.ByteDim: 100, - statistics.KeyDim: 10, - statistics.QueryDim: 10, +var dimToStep = [utils.DimLen]float64{ + utils.ByteDim: 100, + utils.KeyDim: 10, + utils.QueryDim: 10, } func (bs *balanceSolver) getRkCmpPrioritiesV1(old *solution) (firstCmp int, secondCmp int) { @@ -1484,9 +1485,9 @@ func (bs *balanceSolver) buildOperators() (ops []*operator.Operator) { var createOperator func(region *core.RegionInfo, srcStoreID, dstStoreID uint64) (op *operator.Operator, typ string, err error) switch bs.rwTy { - case statistics.Read: + case utils.Read: createOperator = bs.createReadOperator - case statistics.Write: + case utils.Write: createOperator = bs.createWriteOperator } @@ -1514,17 +1515,17 @@ func (bs *balanceSolver) buildOperators() (ops []*operator.Operator) { // bucketFirstStat returns the first priority statistics of the bucket. // if the first priority is query rate, it will return the second priority . -func (bs *balanceSolver) bucketFirstStat() statistics.RegionStatKind { - base := statistics.RegionReadBytes - if bs.rwTy == statistics.Write { - base = statistics.RegionWriteBytes +func (bs *balanceSolver) bucketFirstStat() utils.RegionStatKind { + base := utils.RegionReadBytes + if bs.rwTy == utils.Write { + base = utils.RegionWriteBytes } offset := bs.firstPriority // todo: remove it if bucket's qps has been supported. - if bs.firstPriority == statistics.QueryDim { + if bs.firstPriority == utils.QueryDim { offset = bs.secondPriority } - return base + statistics.RegionStatKind(offset) + return base + utils.RegionStatKind(offset) } func (bs *balanceSolver) splitBucketsOperator(region *core.RegionInfo, keys [][]byte) *operator.Operator { @@ -1540,7 +1541,7 @@ func (bs *balanceSolver) splitBucketsOperator(region *core.RegionInfo, keys [][] return nil } desc := splitHotReadBuckets - if bs.rwTy == statistics.Write { + if bs.rwTy == utils.Write { desc = splitHotWriteBuckets } @@ -1850,16 +1851,16 @@ func (ty resourceType) String() string { } } -func toResourceType(rwTy statistics.RWType, opTy opType) resourceType { +func toResourceType(rwTy utils.RWType, opTy opType) resourceType { switch rwTy { - case statistics.Write: + case utils.Write: switch opTy { case movePeer: return writePeer case transferLeader: return writeLeader } - case statistics.Read: + case utils.Read: switch opTy { case movePeer: return readPeer @@ -1870,16 +1871,16 @@ func toResourceType(rwTy statistics.RWType, opTy opType) resourceType { panic(fmt.Sprintf("invalid arguments for toResourceType: rwTy = %v, opTy = %v", rwTy, opTy)) } -func buildResourceType(rwTy statistics.RWType, ty constant.ResourceKind) resourceType { +func buildResourceType(rwTy utils.RWType, ty constant.ResourceKind) resourceType { switch rwTy { - case statistics.Write: + case utils.Write: switch ty { case constant.RegionKind: return writePeer case constant.LeaderKind: return writeLeader } - case statistics.Read: + case utils.Read: switch ty { case constant.RegionKind: return readPeer @@ -1892,24 +1893,24 @@ func buildResourceType(rwTy statistics.RWType, ty constant.ResourceKind) resourc func stringToDim(name string) int { switch name { - case statistics.BytePriority: - return statistics.ByteDim - case statistics.KeyPriority: - return statistics.KeyDim - case statistics.QueryPriority: - return statistics.QueryDim + case utils.BytePriority: + return utils.ByteDim + case utils.KeyPriority: + return utils.KeyDim + case utils.QueryPriority: + return utils.QueryDim } - return statistics.ByteDim + return utils.ByteDim } func dimToString(dim int) string { switch dim { - case statistics.ByteDim: - return statistics.BytePriority - case statistics.KeyDim: - return statistics.KeyPriority - case statistics.QueryDim: - return statistics.QueryPriority + case utils.ByteDim: + return utils.BytePriority + case utils.KeyDim: + return utils.KeyPriority + case utils.QueryDim: + return utils.QueryPriority default: return "" } diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 61a9942e339..ae7f05c562f 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -26,7 +26,7 @@ import ( "github.com/tikv/pd/pkg/errs" sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/slice" - "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -43,16 +43,16 @@ const ( ) var defaultPrioritiesConfig = prioritiesConfig{ - read: []string{statistics.QueryPriority, statistics.BytePriority}, - writeLeader: []string{statistics.QueryPriority, statistics.BytePriority}, - writePeer: []string{statistics.BytePriority, statistics.KeyPriority}, + read: []string{utils.QueryPriority, utils.BytePriority}, + writeLeader: []string{utils.QueryPriority, utils.BytePriority}, + writePeer: []string{utils.BytePriority, utils.KeyPriority}, } // because tikv below 5.2.0 does not report query information, we will use byte and key as the scheduling dimensions var compatiblePrioritiesConfig = prioritiesConfig{ - read: []string{statistics.BytePriority, statistics.KeyPriority}, - writeLeader: []string{statistics.KeyPriority, statistics.BytePriority}, - writePeer: []string{statistics.BytePriority, statistics.KeyPriority}, + read: []string{utils.BytePriority, utils.KeyPriority}, + writeLeader: []string{utils.KeyPriority, utils.BytePriority}, + writePeer: []string{utils.BytePriority, utils.KeyPriority}, } // params about hot region. @@ -158,13 +158,13 @@ func (conf *hotRegionSchedulerConfig) EncodeConfig() ([]byte, error) { func (conf *hotRegionSchedulerConfig) GetStoreStatZombieDuration() time.Duration { conf.RLock() defer conf.RUnlock() - return time.Duration(conf.MaxZombieRounds*statistics.StoreHeartBeatReportInterval) * time.Second + return time.Duration(conf.MaxZombieRounds*utils.StoreHeartBeatReportInterval) * time.Second } func (conf *hotRegionSchedulerConfig) GetRegionsStatZombieDuration() time.Duration { conf.RLock() defer conf.RUnlock() - return time.Duration(conf.MaxZombieRounds*statistics.RegionHeartBeatReportInterval) * time.Second + return time.Duration(conf.MaxZombieRounds*utils.RegionHeartBeatReportInterval) * time.Second } func (conf *hotRegionSchedulerConfig) GetMaxPeerNumber() int { @@ -314,7 +314,7 @@ func (conf *hotRegionSchedulerConfig) getRankFormulaVersionLocked() string { } } -func (conf *hotRegionSchedulerConfig) IsForbidRWType(rw statistics.RWType) bool { +func (conf *hotRegionSchedulerConfig) IsForbidRWType(rw utils.RWType) bool { conf.RLock() defer conf.RUnlock() return rw.String() == conf.ForbidRWType @@ -328,7 +328,7 @@ func (conf *hotRegionSchedulerConfig) getSplitThresholds() float64 { func (conf *hotRegionSchedulerConfig) getForbidRWTypeLocked() string { switch conf.ForbidRWType { - case statistics.Read.String(), statistics.Write.String(): + case utils.Read.String(), utils.Write.String(): return conf.ForbidRWType default: return "" @@ -352,7 +352,7 @@ func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, r * func isPriorityValid(priorities []string) (map[string]bool, error) { priorityMap := map[string]bool{} for _, p := range priorities { - if p != statistics.BytePriority && p != statistics.KeyPriority && p != statistics.QueryPriority { + if p != utils.BytePriority && p != utils.KeyPriority && p != utils.QueryPriority { return nil, errs.ErrSchedulerConfig.FastGenByArgs("invalid scheduling dimensions") } priorityMap[p] = true @@ -375,7 +375,7 @@ func (conf *hotRegionSchedulerConfig) valid() error { } if pm, err := isPriorityValid(conf.WritePeerPriorities); err != nil { return err - } else if pm[statistics.QueryPriority] { + } else if pm[utils.QueryPriority] { return errs.ErrSchedulerConfig.FastGenByArgs("query is not allowed to be set in priorities for write-peer-priorities") } @@ -383,7 +383,7 @@ func (conf *hotRegionSchedulerConfig) valid() error { return errs.ErrSchedulerConfig.FastGenByArgs("invalid rank-formula-version") } - if conf.ForbidRWType != statistics.Read.String() && conf.ForbidRWType != statistics.Write.String() && + if conf.ForbidRWType != utils.Read.String() && conf.ForbidRWType != utils.Write.String() && conf.ForbidRWType != "none" && conf.ForbidRWType != "" { return errs.ErrSchedulerConfig.FastGenByArgs("invalid forbid-rw-type") } @@ -490,7 +490,7 @@ func getWritePeerPriorities(c *prioritiesConfig) []string { // because tikv below 5.2.0 does not report query information, we will use byte and key as the scheduling dimensions func adjustPrioritiesConfig(querySupport bool, origins []string, getPriorities func(*prioritiesConfig) []string) []string { withQuery := slice.AnyOf(origins, func(i int) bool { - return origins[i] == statistics.QueryPriority + return origins[i] == utils.QueryPriority }) compatibles := getPriorities(&compatiblePrioritiesConfig) if !querySupport && withQuery { @@ -499,7 +499,7 @@ func adjustPrioritiesConfig(querySupport bool, origins []string, getPriorities f defaults := getPriorities(&defaultPrioritiesConfig) isLegal := slice.AllOf(origins, func(i int) bool { - return origins[i] == statistics.BytePriority || origins[i] == statistics.KeyPriority || origins[i] == statistics.QueryPriority + return origins[i] == utils.BytePriority || origins[i] == utils.KeyPriority || origins[i] == utils.QueryPriority }) if len(defaults) == len(origins) && isLegal && origins[0] != origins[1] { return origins diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 680fc8f2d30..ee569f4b70e 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/operatorutil" @@ -40,11 +41,11 @@ import ( func init() { schedulePeerPr = 1.0 - RegisterScheduler(statistics.Write.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { cfg := initHotRegionScheduleConfig() return newHotWriteScheduler(opController, cfg), nil }) - RegisterScheduler(statistics.Read.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { + RegisterScheduler(utils.Read.String(), func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) { return newHotReadScheduler(opController, initHotRegionScheduleConfig()), nil }) } @@ -52,14 +53,14 @@ func init() { func newHotReadScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { ret := newHotScheduler(opController, conf) ret.name = "" - ret.types = []statistics.RWType{statistics.Read} + ret.types = []utils.RWType{utils.Read} return ret } func newHotWriteScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { ret := newHotScheduler(opController, conf) ret.name = "" - ret.types = []statistics.RWType{statistics.Write} + ret.types = []utils.RWType{utils.Write} return ret } @@ -75,35 +76,35 @@ func TestUpgrade(t *testing.T) { sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(HotRegionType, nil)) re.NoError(err) hb := sche.(*hotScheduler) - re.Equal([]string{statistics.QueryPriority, statistics.BytePriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{statistics.QueryPriority, statistics.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{statistics.BytePriority, statistics.KeyPriority}, hb.conf.GetWritePeerPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) re.Equal("v2", hb.conf.GetRankFormulaVersion()) // upgrade from json(null) sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb = sche.(*hotScheduler) - re.Equal([]string{statistics.QueryPriority, statistics.BytePriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{statistics.QueryPriority, statistics.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{statistics.BytePriority, statistics.KeyPriority}, hb.conf.GetWritePeerPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) re.Equal("v2", hb.conf.GetRankFormulaVersion()) // upgrade from < 5.2 config51 := `{"min-hot-byte-rate":100,"min-hot-key-rate":10,"min-hot-query-rate":10,"max-zombie-rounds":5,"max-peer-number":1000,"byte-rate-rank-step-ratio":0.05,"key-rate-rank-step-ratio":0.05,"query-rate-rank-step-ratio":0.05,"count-rank-step-ratio":0.01,"great-dec-ratio":0.95,"minor-dec-ratio":0.99,"src-tolerance-ratio":1.05,"dst-tolerance-ratio":1.05,"strict-picking-store":"true","enable-for-tiflash":"true"}` sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config51))) re.NoError(err) hb = sche.(*hotScheduler) - re.Equal([]string{statistics.BytePriority, statistics.KeyPriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{statistics.KeyPriority, statistics.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{statistics.BytePriority, statistics.KeyPriority}, hb.conf.GetWritePeerPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetReadPriorities()) + re.Equal([]string{utils.KeyPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) re.Equal("v1", hb.conf.GetRankFormulaVersion()) // upgrade from < 6.4 config54 := `{"min-hot-byte-rate":100,"min-hot-key-rate":10,"min-hot-query-rate":10,"max-zombie-rounds":5,"max-peer-number":1000,"byte-rate-rank-step-ratio":0.05,"key-rate-rank-step-ratio":0.05,"query-rate-rank-step-ratio":0.05,"count-rank-step-ratio":0.01,"great-dec-ratio":0.95,"minor-dec-ratio":0.99,"src-tolerance-ratio":1.05,"dst-tolerance-ratio":1.05,"read-priorities":["query","byte"],"write-leader-priorities":["query","byte"],"write-peer-priorities":["byte","key"],"strict-picking-store":"true","enable-for-tiflash":"true","forbid-rw-type":"none"}` sche, err = CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte(config54))) re.NoError(err) hb = sche.(*hotScheduler) - re.Equal([]string{statistics.QueryPriority, statistics.BytePriority}, hb.conf.GetReadPriorities()) - re.Equal([]string{statistics.QueryPriority, statistics.BytePriority}, hb.conf.GetWriteLeaderPriorities()) - re.Equal([]string{statistics.BytePriority, statistics.KeyPriority}, hb.conf.GetWritePeerPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetReadPriorities()) + re.Equal([]string{utils.QueryPriority, utils.BytePriority}, hb.conf.GetWriteLeaderPriorities()) + re.Equal([]string{utils.BytePriority, utils.KeyPriority}, hb.conf.GetWritePeerPriorities()) re.Equal("v1", hb.conf.GetRankFormulaVersion()) } @@ -140,8 +141,8 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { re.NoError(err) re.NotNil(op) op.Start() - op.SetStatusReachTime(operator.CREATED, time.Now().Add(-5*statistics.StoreHeartBeatReportInterval*time.Second)) - op.SetStatusReachTime(operator.STARTED, time.Now().Add((-5*statistics.StoreHeartBeatReportInterval+1)*time.Second)) + op.SetStatusReachTime(operator.CREATED, time.Now().Add(-5*utils.StoreHeartBeatReportInterval*time.Second)) + op.SetStatusReachTime(operator.STARTED, time.Now().Add((-5*utils.StoreHeartBeatReportInterval+1)*time.Second)) return newPendingInfluence(op, []uint64{2}, 4, statistics.Influence{}, hb.conf.GetStoreStatZombieDuration()) } justDoneOpInfluence := func(region *core.RegionInfo, ty opType) *pendingInfluence { @@ -151,7 +152,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { } shouldRemoveOpInfluence := func(region *core.RegionInfo, ty opType) *pendingInfluence { infl := justDoneOpInfluence(region, ty) - infl.op.SetStatusReachTime(operator.CANCELED, time.Now().Add(-3*statistics.StoreHeartBeatReportInterval*time.Second)) + infl.op.SetStatusReachTime(operator.CANCELED, time.Now().Add(-3*utils.StoreHeartBeatReportInterval*time.Second)) return infl } opInfluenceCreators := [3]func(region *core.RegionInfo, ty opType) *pendingInfluence{shouldRemoveOpInfluence, notDoneOpInfluence, justDoneOpInfluence} @@ -209,7 +210,7 @@ func TestSplitIfRegionTooHot(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetHotRegionCacheHitsThreshold(1) - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) b := &metapb.Buckets{ RegionId: 1, @@ -239,14 +240,14 @@ func TestSplitIfRegionTooHot(t *testing.T) { tc.AddRegionStore(2, 2) tc.AddRegionStore(3, 2) - tc.UpdateStorageReadBytes(1, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(2, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(3, 1*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(1, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(2, 1*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(3, 1*units.MiB*utils.StoreHeartBeatReportInterval) // Region 1, 2 and 3 are hot regions. - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 4 * units.MiB, 0, 0}, }) - tc.GetStoreConfig().SetRegionBucketEnabled(true) + tc.SetRegionBucketEnabled(true) ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) expectOp, _ := operator.CreateSplitRegionOperator(splitHotReadBuckets, tc.GetRegion(1), operator.OpSplit, @@ -257,14 +258,14 @@ func TestSplitIfRegionTooHot(t *testing.T) { ops, _ = hb.Schedule(tc, false) re.Len(ops, 0) - tc.UpdateStorageWrittenBytes(1, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 1*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 1*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 1*units.MiB*utils.StoreHeartBeatReportInterval) // Region 1, 2 and 3 are hot regions. - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 4 * units.MiB, 0, 0}, }) - hb, _ = CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, _ = CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) expectOp, _ = operator.CreateSplitRegionOperator(splitHotReadBuckets, tc.GetRegion(1), operator.OpSplit, @@ -282,11 +283,11 @@ func TestSplitBucketsBySize(t *testing.T) { statistics.Denoising = false cancel, _, tc, oc := prepareSchedulersTest() tc.SetHotRegionCacheHitsThreshold(1) - tc.GetStoreConfig().SetRegionBucketEnabled(true) + tc.SetRegionBucketEnabled(true) defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - solve := newBalanceSolver(hb.(*hotScheduler), tc, statistics.Read, transferLeader) + solve := newBalanceSolver(hb.(*hotScheduler), tc, utils.Read, transferLeader) solve.cur = &solution{} region := core.NewTestRegionInfo(1, 1, []byte("a"), []byte("f")) @@ -335,11 +336,11 @@ func TestSplitBucketsByLoad(t *testing.T) { statistics.Denoising = false cancel, _, tc, oc := prepareSchedulersTest() tc.SetHotRegionCacheHitsThreshold(1) - tc.GetStoreConfig().SetRegionBucketEnabled(true) + tc.SetRegionBucketEnabled(true) defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - solve := newBalanceSolver(hb.(*hotScheduler), tc, statistics.Read, transferLeader) + solve := newBalanceSolver(hb.(*hotScheduler), tc, utils.Read, transferLeader) solve.cur = &solution{} region := core.NewTestRegionInfo(1, 1, []byte("a"), []byte("f")) testdata := []struct { @@ -398,7 +399,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace tc.SetEnablePlacementRules(enablePlacementRules) labels := []string{"zone", "host"} tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...) - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) tc.SetHotRegionCacheHitsThreshold(0) @@ -420,10 +421,10 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace // | 4 | 6MB | // | 5 | 0MB | // | 6 | 0MB | - tc.UpdateStorageWrittenBytes(1, 7.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 4.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 4.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(4, 6*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 7.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 4.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 4.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(4, 6*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenBytes(5, 0) tc.UpdateStorageWrittenBytes(6, 0) @@ -433,7 +434,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace // | 2 | 1 | 3 | 4 | 512KB | // | 3 | 1 | 2 | 4 | 512KB | // Region 1, 2 and 3 are hot regions. - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {2, []uint64{1, 3, 4}, 512 * units.KiB, 0, 0}, {3, []uint64{1, 2, 4}, 512 * units.KiB, 0, 0}, @@ -507,12 +508,12 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace // | 4 | 3.1MB | // | 5 | 0MB | // | 6 | 3MB | - tc.UpdateStorageWrittenBytes(1, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(4, 3.1*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(4, 3.1*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenBytes(5, 0) - tc.UpdateStorageWrittenBytes(6, 3*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(6, 3*units.MiB*utils.StoreHeartBeatReportInterval) // | region_id | leader_store | follower_store | follower_store | written_bytes | // |-----------|--------------|----------------|----------------|---------------| @@ -521,7 +522,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace // | 3 | 6 | 1 | 4 | 512KB | // | 4 | 5 | 6 | 4 | 512KB | // | 5 | 3 | 4 | 5 | 512KB | - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {2, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {3, []uint64{6, 1, 4}, 512 * units.KiB, 0, 0}, @@ -601,7 +602,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { }, }, })) - sche, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb := sche.(*hotScheduler) @@ -643,7 +644,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { {3, []uint64{1, 2, 4, 9}, 512 * units.KiB, 5 * units.KiB, 3000}, {4, []uint64{2, 10}, 100, 1, 1}, } - addRegionInfo(tc, statistics.Write, testRegions) + addRegionInfo(tc, utils.Write, testRegions) regionBytesSum := 0.0 regionKeysSum := 0.0 regionQuerySum := 0.0 @@ -699,10 +700,10 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { // | 10 | n/a | // | 11 | n/a | storesBytes := map[uint64]uint64{ - 1: 7.5 * units.MiB * statistics.StoreHeartBeatReportInterval, - 2: 4.5 * units.MiB * statistics.StoreHeartBeatReportInterval, - 3: 4.5 * units.MiB * statistics.StoreHeartBeatReportInterval, - 4: 6 * units.MiB * statistics.StoreHeartBeatReportInterval, + 1: 7.5 * units.MiB * utils.StoreHeartBeatReportInterval, + 2: 4.5 * units.MiB * utils.StoreHeartBeatReportInterval, + 3: 4.5 * units.MiB * utils.StoreHeartBeatReportInterval, + 4: 6 * units.MiB * utils.StoreHeartBeatReportInterval, } tc.SetStoreEvictLeader(5, true) tikvBytesSum, tikvKeysSum, tikvQuerySum := 0.0, 0.0, 0.0 @@ -790,22 +791,22 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { statistics.Denoising = false statisticsInterval = 0 - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.QueryPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.QueryPriority, utils.BytePriority} tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) - tc.UpdateStorageWriteQuery(1, 11000*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWriteQuery(2, 10000*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWriteQuery(3, 9000*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWriteQuery(1, 11000*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWriteQuery(2, 10000*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWriteQuery(3, 9000*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 500, 0, 500}, {2, []uint64{1, 2, 3}, 500, 0, 500}, {3, []uint64{2, 1, 3}, 500, 0, 500}, @@ -826,11 +827,11 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.RankFormulaVersion = "v1" tc.SetHotRegionCacheHitsThreshold(0) @@ -841,13 +842,13 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - tc.UpdateStorageWrittenStats(1, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 9.8*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 9*units.MiB*statistics.StoreHeartBeatReportInterval, 9*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 8.9*units.MiB*statistics.StoreHeartBeatReportInterval, 9.2*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(1, 10.5*units.MiB*utils.StoreHeartBeatReportInterval, 10.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 9.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 9.8*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 9*units.MiB*utils.StoreHeartBeatReportInterval, 9*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 8.9*units.MiB*utils.StoreHeartBeatReportInterval, 9.2*units.MiB*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{2, 1, 3}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, {2, []uint64{2, 1, 3}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, {3, []uint64{2, 4, 3}, 0.05 * units.MiB, 0.1 * units.MiB, 0}, @@ -885,7 +886,7 @@ func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -897,11 +898,11 @@ func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) - tc.UpdateStorageWrittenStats(1, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 0*units.MiB*statistics.StoreHeartBeatReportInterval, 0*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + tc.UpdateStorageWrittenStats(1, 10.5*units.MiB*utils.StoreHeartBeatReportInterval, 10.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 10*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 9.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 0*units.MiB*utils.StoreHeartBeatReportInterval, 0*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, {2, []uint64{2, 1, 3}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, {3, []uint64{3, 2, 1}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, @@ -933,7 +934,7 @@ func TestHotWriteRegionScheduleCheckHot(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -946,12 +947,12 @@ func TestHotWriteRegionScheduleCheckHot(t *testing.T) { tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - tc.UpdateStorageWrittenStats(1, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(1, 10.5*units.MiB*utils.StoreHeartBeatReportInterval, 10.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 10.5*units.MiB*utils.StoreHeartBeatReportInterval, 10.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 10.5*units.MiB*utils.StoreHeartBeatReportInterval, 10.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 90, 0.5 * units.MiB, 0}, // no hot {1, []uint64{2, 1, 3}, 90, 0.5 * units.MiB, 0}, // no hot {2, []uint64{3, 2, 1}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, // byteDecRatio is greater than greatDecRatio @@ -968,8 +969,8 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} re.NoError(err) tc.SetHotRegionCacheHitsThreshold(0) @@ -977,19 +978,19 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) - tc.UpdateStorageWrittenBytes(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 10*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 10*units.MiB*utils.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenKeys(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenKeys(2, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenKeys(3, 10*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenKeys(1, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenKeys(2, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenKeys(3, 10*units.MiB*utils.StoreHeartBeatReportInterval) // store1 has 2 peer as leader // store2 has 3 peer as leader // store3 has 2 peer as leader // If transfer leader from store2 to store1 or store3, it will keep on looping, which introduces a lot of unnecessary scheduling - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, {2, []uint64{1, 2, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, {3, []uint64{2, 1, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, @@ -1005,7 +1006,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { re.Empty(ops) } - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {8, []uint64{2, 1, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, }) @@ -1034,9 +1035,9 @@ func TestHotWriteRegionScheduleWithPendingInfluence(t *testing.T) { func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim int) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.RankFormulaVersion = "v1" old := pendingAmpFactor pendingAmpFactor = 0.0 @@ -1055,13 +1056,13 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim if dim == 1 { // key rate updateStore = tc.UpdateStorageWrittenKeys } - updateStore(1, 8*units.MiB*statistics.StoreHeartBeatReportInterval) - updateStore(2, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - updateStore(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - updateStore(4, 4*units.MiB*statistics.StoreHeartBeatReportInterval) + updateStore(1, 8*units.MiB*utils.StoreHeartBeatReportInterval) + updateStore(2, 6*units.MiB*utils.StoreHeartBeatReportInterval) + updateStore(3, 6*units.MiB*utils.StoreHeartBeatReportInterval) + updateStore(4, 4*units.MiB*utils.StoreHeartBeatReportInterval) if dim == 0 { // byte rate - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {2, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {3, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, @@ -1070,7 +1071,7 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim {6, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, }) } else if dim == 1 { // key rate - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0, 512 * units.KiB, 0}, {2, []uint64{1, 2, 3}, 0, 512 * units.KiB, 0}, {3, []uint64{1, 2, 3}, 0, 512 * units.KiB, 0}, @@ -1122,9 +1123,9 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetEnablePlacementRules(true) - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} tc.SetHotRegionCacheHitsThreshold(0) key, err := hex.DecodeString("") @@ -1170,15 +1171,15 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { }) re.NoError(err) - tc.UpdateStorageWrittenBytes(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 10*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 10*units.MiB*utils.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenKeys(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenKeys(2, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenKeys(3, 10*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenKeys(1, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenKeys(2, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenKeys(3, 10*units.MiB*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, {2, []uint64{1, 2, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, {3, []uint64{2, 1, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, @@ -1204,10 +1205,10 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - scheduler, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + scheduler, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb := scheduler.(*hotScheduler) - hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} tc.SetHotRegionCacheHitsThreshold(0) // Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0. @@ -1224,10 +1225,10 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // | 3 | 3.7MB | // | 4 | 6MB | // | 5 | 0MB | - tc.UpdateStorageReadBytes(1, 7.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(2, 4.9*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(3, 3.7*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(4, 6*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(1, 7.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(2, 4.9*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(3, 3.7*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(4, 6*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageReadBytes(5, 0) // | region_id | leader_store | follower_store | follower_store | read_bytes_rate | @@ -1237,7 +1238,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // | 3 | 1 | 2 | 3 | 510KB | // | 11 | 1 | 2 | 3 | 7KB | // Region 1, 2 and 3 are hot regions. - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {2, []uint64{2, 1, 3}, 511 * units.KiB, 0, 0}, {3, []uint64{1, 2, 3}, 510 * units.KiB, 0, 0}, @@ -1247,14 +1248,14 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { re.True(tc.IsRegionHot(tc.GetRegion(1))) re.False(tc.IsRegionHot(tc.GetRegion(11))) // check randomly pick hot region - r := tc.HotRegionsFromStore(2, statistics.Read) + r := tc.HotRegionsFromStore(2, utils.Read) re.Len(r, 3) // check hot items - stats := tc.HotCache.RegionStats(statistics.Read, 0) + stats := tc.HotCache.RegionStats(utils.Read, 0) re.Len(stats, 3) for _, ss := range stats { for _, s := range ss { - re.Less(500.0*units.KiB, s.GetLoad(statistics.ByteDim)) + re.Less(500.0*units.KiB, s.GetLoad(utils.ByteDim)) } } @@ -1265,11 +1266,11 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // it is better than transfer leader from store 1 to store 3 operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion, 1, 5) re.Contains(hb.regionPendings, uint64(1)) - re.True(typeutil.Float64Equal(512.0*units.KiB, hb.regionPendings[1].origin.Loads[statistics.RegionReadBytes])) + re.True(typeutil.Float64Equal(512.0*units.KiB, hb.regionPendings[1].origin.Loads[utils.RegionReadBytes])) clearPendingInfluence(hb) // assume handle the transfer leader operator rather than move leader - tc.AddRegionWithReadInfo(3, 3, 512*units.KiB*statistics.ReadReportInterval, 0, 0, statistics.ReadReportInterval, []uint64{1, 2}) + tc.AddRegionWithReadInfo(3, 3, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{1, 2}) // After transfer a hot region leader from store 1 to store 3 // the three region leader will be evenly distributed in three stores @@ -1280,11 +1281,11 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // | 3 | 5.5MB | // | 4 | 3.4MB | // | 5 | 3MB | - tc.UpdateStorageReadBytes(1, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(2, 5.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(3, 5.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(4, 3.4*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(5, 3*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(1, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(2, 5.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(3, 5.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(4, 3.4*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(5, 3*units.MiB*utils.StoreHeartBeatReportInterval) // | region_id | leader_store | follower_store | follower_store | read_bytes_rate | // |-----------|--------------|----------------|----------------|--------------------| @@ -1294,7 +1295,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { // | 4 | 1 | 2 | 3 | 509KB | // | 5 | 4 | 2 | 5 | 508KB | // | 11 | 1 | 2 | 3 | 7KB | - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {4, []uint64{1, 2, 3}, 509 * units.KiB, 0, 0}, {5, []uint64{4, 2, 5}, 508 * units.KiB, 0, 0}, }) @@ -1304,7 +1305,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { op = ops[0] operatorutil.CheckTransferPeerWithLeaderTransfer(re, op, operator.OpHotRegion|operator.OpLeader, 1, 5) re.Contains(hb.regionPendings, uint64(1)) - re.True(typeutil.Float64Equal(512.0*units.KiB, hb.regionPendings[1].origin.Loads[statistics.RegionReadBytes])) + re.True(typeutil.Float64Equal(512.0*units.KiB, hb.regionPendings[1].origin.Loads[utils.RegionReadBytes])) clearPendingInfluence(hb) // Should not panic if region not found. @@ -1316,7 +1317,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { hb.updateReadTime = time.Now().Add(-time.Second) hb.Schedule(tc, false) re.Contains(hb.regionPendings, uint64(4)) - re.True(typeutil.Float64Equal(509.0*units.KiB, hb.regionPendings[4].origin.Loads[statistics.RegionReadBytes])) + re.True(typeutil.Float64Equal(509.0*units.KiB, hb.regionPendings[4].origin.Loads[utils.RegionReadBytes])) clearPendingInfluence(hb) } @@ -1327,7 +1328,7 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) @@ -1338,11 +1339,11 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) - tc.UpdateStorageReadQuery(1, 10500*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadQuery(2, 10000*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadQuery(3, 9000*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageReadQuery(1, 10500*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadQuery(2, 10000*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadQuery(3, 9000*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0, 0, 500}, {2, []uint64{2, 1, 3}, 0, 0, 500}, }) @@ -1362,12 +1363,12 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) @@ -1376,13 +1377,13 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - tc.UpdateStorageReadStats(1, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval, 10.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 9.8*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(4, 9*units.MiB*statistics.StoreHeartBeatReportInterval, 9*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(5, 8.9*units.MiB*statistics.StoreHeartBeatReportInterval, 9.2*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(1, 10.5*units.MiB*utils.StoreHeartBeatReportInterval, 10.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 9.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 9.8*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(4, 9*units.MiB*utils.StoreHeartBeatReportInterval, 9*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(5, 8.9*units.MiB*utils.StoreHeartBeatReportInterval, 9.2*units.MiB*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 4}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, {2, []uint64{1, 2, 4}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, {3, []uint64{3, 4, 5}, 0.05 * units.MiB, 0.1 * units.MiB, 0}, @@ -1424,14 +1425,14 @@ func TestHotReadRegionScheduleWithPendingInfluence(t *testing.T) { func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim int) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) // For test hb.(*hotScheduler).conf.RankFormulaVersion = "v1" hb.(*hotScheduler).conf.GreatDecRatio = 0.99 hb.(*hotScheduler).conf.MinorDecRatio = 1 hb.(*hotScheduler).conf.DstToleranceRatio = 1 - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} old := pendingAmpFactor pendingAmpFactor = 0.0 defer func() { @@ -1449,13 +1450,13 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim if dim == 1 { // key rate updateStore = tc.UpdateStorageReadKeys } - updateStore(1, 7.1*units.MiB*statistics.StoreHeartBeatReportInterval) - updateStore(2, 6.1*units.MiB*statistics.StoreHeartBeatReportInterval) - updateStore(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - updateStore(4, 5*units.MiB*statistics.StoreHeartBeatReportInterval) + updateStore(1, 7.1*units.MiB*utils.StoreHeartBeatReportInterval) + updateStore(2, 6.1*units.MiB*utils.StoreHeartBeatReportInterval) + updateStore(3, 6*units.MiB*utils.StoreHeartBeatReportInterval) + updateStore(4, 5*units.MiB*utils.StoreHeartBeatReportInterval) if dim == 0 { // byte rate - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {2, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {3, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, @@ -1466,7 +1467,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim {8, []uint64{3, 2, 1}, 512 * units.KiB, 0, 0}, }) } else if dim == 1 { // key rate - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0, 512 * units.KiB, 0}, {2, []uint64{1, 2, 3}, 0, 512 * units.KiB, 0}, {3, []uint64{1, 2, 3}, 0, 512 * units.KiB, 0}, @@ -1542,12 +1543,12 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetStrictPickingStore(false) - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) @@ -1558,11 +1559,11 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { tc.AddRegionStore(6, 20) // no uniform among four stores - tc.UpdateStorageReadStats(1, 10.05*units.MB*statistics.StoreHeartBeatReportInterval, 10.05*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 10.05*units.MB*statistics.StoreHeartBeatReportInterval, 10.05*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 10.05*units.MB*statistics.StoreHeartBeatReportInterval, 10.05*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(4, 0.0*units.MB*statistics.StoreHeartBeatReportInterval, 0.0*units.MB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + tc.UpdateStorageReadStats(1, 10.05*units.MB*utils.StoreHeartBeatReportInterval, 10.05*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 10.05*units.MB*utils.StoreHeartBeatReportInterval, 10.05*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 10.05*units.MB*utils.StoreHeartBeatReportInterval, 10.05*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(4, 0.0*units.MB*utils.StoreHeartBeatReportInterval, 0.0*units.MB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 5, 6}, 0.05 * units.MB, 0.05 * units.MB, 0}, }) ops, _ := hb.Schedule(tc, false) @@ -1583,41 +1584,41 @@ func TestHotCacheUpdateCache(t *testing.T) { tc.SetHotRegionCacheHitsThreshold(0) // For read flow - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {2, []uint64{2, 1, 3}, 512 * units.KiB, 0, 0}, {3, []uint64{1, 2, 3}, 20 * units.KiB, 0, 0}, // lower than hot read flow rate, but higher than write flow rate {11, []uint64{1, 2, 3}, 7 * units.KiB, 0, 0}, }) - stats := tc.RegionStats(statistics.Read, 0) + stats := tc.RegionStats(utils.Read, 0) re.Len(stats[1], 3) re.Len(stats[2], 3) re.Len(stats[3], 3) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {3, []uint64{2, 1, 3}, 20 * units.KiB, 0, 0}, {11, []uint64{1, 2, 3}, 7 * units.KiB, 0, 0}, }) - stats = tc.RegionStats(statistics.Read, 0) + stats = tc.RegionStats(utils.Read, 0) re.Len(stats[1], 3) re.Len(stats[2], 3) re.Len(stats[3], 3) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {4, []uint64{1, 2, 3}, 512 * units.KiB, 0, 0}, {5, []uint64{1, 2, 3}, 20 * units.KiB, 0, 0}, {6, []uint64{1, 2, 3}, 0.8 * units.KiB, 0, 0}, }) - stats = tc.RegionStats(statistics.Write, 0) + stats = tc.RegionStats(utils.Write, 0) re.Len(stats[1], 2) re.Len(stats[2], 2) re.Len(stats[3], 2) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {5, []uint64{1, 2, 5}, 20 * units.KiB, 0, 0}, }) - stats = tc.RegionStats(statistics.Write, 0) + stats = tc.RegionStats(utils.Write, 0) re.Len(stats[1], 2) re.Len(stats[2], 2) @@ -1632,7 +1633,7 @@ func TestHotCacheUpdateCache(t *testing.T) { // lower than hot read flow rate, but higher than write flow rate {31, []uint64{4, 5, 6}, 7 * units.KiB, 0, 0}, }) - stats = tc.RegionStats(statistics.Read, 0) + stats = tc.RegionStats(utils.Read, 0) re.Len(stats[4], 2) re.Len(stats[5], 1) re.Empty(stats[6]) @@ -1648,17 +1649,17 @@ func TestHotCacheKeyThresholds(t *testing.T) { cancel, _, tc, _ := prepareSchedulersTest() defer cancel() tc.SetHotRegionCacheHitsThreshold(0) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0, 1, 0}, {2, []uint64{1, 2, 3}, 0, 1 * units.KiB, 0}, }) - stats := tc.RegionStats(statistics.Read, 0) + stats := tc.RegionStats(utils.Read, 0) re.Len(stats[1], 1) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {3, []uint64{4, 5, 6}, 0, 1, 0}, {4, []uint64{4, 5, 6}, 0, 1 * units.KiB, 0}, }) - stats = tc.RegionStats(statistics.Write, 0) + stats = tc.RegionStats(utils.Write, 0) re.Len(stats[4], 1) re.Len(stats[5], 1) re.Len(stats[6], 1) @@ -1683,31 +1684,31 @@ func TestHotCacheKeyThresholds(t *testing.T) { } { // read - addRegionInfo(tc, statistics.Read, regions) - stats := tc.RegionStats(statistics.Read, 0) + addRegionInfo(tc, utils.Read, regions) + stats := tc.RegionStats(utils.Read, 0) re.Greater(len(stats[1]), 500) // for AntiCount - addRegionInfo(tc, statistics.Read, regions) - addRegionInfo(tc, statistics.Read, regions) - addRegionInfo(tc, statistics.Read, regions) - addRegionInfo(tc, statistics.Read, regions) - stats = tc.RegionStats(statistics.Read, 0) + addRegionInfo(tc, utils.Read, regions) + addRegionInfo(tc, utils.Read, regions) + addRegionInfo(tc, utils.Read, regions) + addRegionInfo(tc, utils.Read, regions) + stats = tc.RegionStats(utils.Read, 0) re.Len(stats[1], 500) } { // write - addRegionInfo(tc, statistics.Write, regions) - stats := tc.RegionStats(statistics.Write, 0) + addRegionInfo(tc, utils.Write, regions) + stats := tc.RegionStats(utils.Write, 0) re.Greater(len(stats[1]), 500) re.Greater(len(stats[2]), 500) re.Greater(len(stats[3]), 500) // for AntiCount - addRegionInfo(tc, statistics.Write, regions) - addRegionInfo(tc, statistics.Write, regions) - addRegionInfo(tc, statistics.Write, regions) - addRegionInfo(tc, statistics.Write, regions) - stats = tc.RegionStats(statistics.Write, 0) + addRegionInfo(tc, utils.Write, regions) + addRegionInfo(tc, utils.Write, regions) + addRegionInfo(tc, utils.Write, regions) + addRegionInfo(tc, utils.Write, regions) + stats = tc.RegionStats(utils.Write, 0) re.Len(stats[1], 500) re.Len(stats[2], 500) re.Len(stats[3], 500) @@ -1734,32 +1735,32 @@ func TestHotCacheByteAndKey(t *testing.T) { }) } { // read - addRegionInfo(tc, statistics.Read, regions) - stats := tc.RegionStats(statistics.Read, 0) + addRegionInfo(tc, utils.Read, regions) + stats := tc.RegionStats(utils.Read, 0) re.Len(stats[1], 500) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {10001, []uint64{1, 2, 3}, 10 * units.KiB, 10 * units.KiB, 0}, {10002, []uint64{1, 2, 3}, 500 * units.KiB, 10 * units.KiB, 0}, {10003, []uint64{1, 2, 3}, 10 * units.KiB, 500 * units.KiB, 0}, {10004, []uint64{1, 2, 3}, 500 * units.KiB, 500 * units.KiB, 0}, }) - stats = tc.RegionStats(statistics.Read, 0) + stats = tc.RegionStats(utils.Read, 0) re.Len(stats[1], 503) } { // write - addRegionInfo(tc, statistics.Write, regions) - stats := tc.RegionStats(statistics.Write, 0) + addRegionInfo(tc, utils.Write, regions) + stats := tc.RegionStats(utils.Write, 0) re.Len(stats[1], 500) re.Len(stats[2], 500) re.Len(stats[3], 500) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {10001, []uint64{1, 2, 3}, 10 * units.KiB, 10 * units.KiB, 0}, {10002, []uint64{1, 2, 3}, 500 * units.KiB, 10 * units.KiB, 0}, {10003, []uint64{1, 2, 3}, 10 * units.KiB, 500 * units.KiB, 0}, {10004, []uint64{1, 2, 3}, 500 * units.KiB, 500 * units.KiB, 0}, }) - stats = tc.RegionStats(statistics.Write, 0) + stats = tc.RegionStats(utils.Write, 0) re.Len(stats[1], 503) re.Len(stats[2], 503) re.Len(stats[3], 503) @@ -1775,14 +1776,14 @@ type testRegionInfo struct { queryRate float64 } -func addRegionInfo(tc *mockcluster.Cluster, rwTy statistics.RWType, regions []testRegionInfo) { +func addRegionInfo(tc *mockcluster.Cluster, rwTy utils.RWType, regions []testRegionInfo) { addFunc := tc.AddRegionWithReadInfo - if rwTy == statistics.Write { + if rwTy == utils.Write { addFunc = tc.AddLeaderRegionWithWriteInfo } - reportIntervalSecs := statistics.WriteReportInterval - if rwTy == statistics.Read { - reportIntervalSecs = statistics.ReadReportInterval + reportIntervalSecs := utils.RegionHeartBeatReportInterval + if rwTy == utils.Read { + reportIntervalSecs = utils.StoreHeartBeatReportInterval } for _, r := range regions { addFunc( @@ -1798,7 +1799,7 @@ func addRegionInfo(tc *mockcluster.Cluster, rwTy statistics.RWType, regions []te func addRegionLeaderReadInfo(tc *mockcluster.Cluster, regions []testRegionInfo) { addFunc := tc.AddRegionLeaderWithReadInfo - reportIntervalSecs := statistics.ReadReportInterval + reportIntervalSecs := utils.StoreHeartBeatReportInterval for _, r := range regions { addFunc( r.id, r.peers[0], @@ -1812,7 +1813,7 @@ func addRegionLeaderReadInfo(tc *mockcluster.Cluster, regions []testRegionInfo) } type testHotCacheCheckRegionFlowCase struct { - kind statistics.RWType + kind utils.RWType onlyLeader bool DegreeAfterTransferLeader int } @@ -1821,17 +1822,17 @@ func TestHotCacheCheckRegionFlow(t *testing.T) { re := require.New(t) testCases := []testHotCacheCheckRegionFlowCase{ { - kind: statistics.Write, + kind: utils.Write, onlyLeader: false, DegreeAfterTransferLeader: 3, }, { - kind: statistics.Read, + kind: utils.Read, onlyLeader: false, DegreeAfterTransferLeader: 4, }, { - kind: statistics.Read, + kind: utils.Read, onlyLeader: true, DegreeAfterTransferLeader: 1, }, @@ -1854,7 +1855,7 @@ func checkHotCacheCheckRegionFlow(re *require.Assertions, testCase testHotCacheC re.NoError(err) hb := sche.(*hotScheduler) heartbeat := tc.AddLeaderRegionWithWriteInfo - if testCase.kind == statistics.Read { + if testCase.kind == utils.Read { if testCase.onlyLeader { heartbeat = tc.AddRegionLeaderWithReadInfo } else { @@ -1862,10 +1863,10 @@ func checkHotCacheCheckRegionFlow(re *require.Assertions, testCase testHotCacheC } } tc.AddRegionStore(2, 20) - tc.UpdateStorageReadStats(2, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval, 9.5*units.MiB*statistics.StoreHeartBeatReportInterval) - reportInterval := uint64(statistics.WriteReportInterval) - if testCase.kind == statistics.Read { - reportInterval = uint64(statistics.ReadReportInterval) + tc.UpdateStorageReadStats(2, 9.5*units.MiB*utils.StoreHeartBeatReportInterval, 9.5*units.MiB*utils.StoreHeartBeatReportInterval) + reportInterval := uint64(utils.RegionHeartBeatReportInterval) + if testCase.kind == utils.Read { + reportInterval = uint64(utils.StoreHeartBeatReportInterval) } // hot degree increase heartbeat(1, 1, 512*units.KiB*reportInterval, 0, 0, reportInterval, []uint64{2, 3}, 1) @@ -1905,7 +1906,7 @@ func checkHotCacheCheckRegionFlow(re *require.Assertions, testCase testHotCacheC re.NotEmpty(items) for _, item := range items { if item.StoreID == 3 { - re.Equal(statistics.Remove, item.GetActionType()) + re.Equal(utils.Remove, item.GetActionType()) continue } re.Equal(testCase.DegreeAfterTransferLeader+2, item.HotDegree) @@ -1927,28 +1928,28 @@ func checkHotCacheCheckRegionFlowWithDifferentThreshold(re *require.Assertions, tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...) statistics.ThresholdsUpdateInterval = 0 defer func() { - statistics.ThresholdsUpdateInterval = statistics.StoreHeartBeatReportInterval + statistics.ThresholdsUpdateInterval = utils.StoreHeartBeatReportInterval }() // some peers are hot, and some are cold #3198 rate := uint64(512 * units.KiB) for i := 0; i < statistics.TopNN; i++ { - for j := 0; j < statistics.DefaultAotSize; j++ { - tc.AddLeaderRegionWithWriteInfo(uint64(i+100), 1, rate*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3}, 1) + for j := 0; j < utils.DefaultAotSize; j++ { + tc.AddLeaderRegionWithWriteInfo(uint64(i+100), 1, rate*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}, 1) } } - items := tc.AddLeaderRegionWithWriteInfo(201, 1, rate*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3}, 1) - re.Equal(float64(rate)*statistics.HotThresholdRatio, tc.HotCache.GetThresholds(statistics.Write, items[0].StoreID)[0]) + items := tc.AddLeaderRegionWithWriteInfo(201, 1, rate*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}, 1) + re.Equal(float64(rate)*statistics.HotThresholdRatio, tc.HotCache.GetThresholds(utils.Write, items[0].StoreID)[0]) // Threshold of store 1,2,3 is 409.6 units.KiB and others are 1 units.KiB // Make the hot threshold of some store is high and the others are low rate = 10 * units.KiB - tc.AddLeaderRegionWithWriteInfo(201, 1, rate*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3, 4}, 1) - items = tc.AddLeaderRegionWithWriteInfo(201, 1, rate*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{3, 4}, 1) + tc.AddLeaderRegionWithWriteInfo(201, 1, rate*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3, 4}, 1) + items = tc.AddLeaderRegionWithWriteInfo(201, 1, rate*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{3, 4}, 1) for _, item := range items { if item.StoreID < 4 { - re.Equal(statistics.Remove, item.GetActionType()) + re.Equal(utils.Remove, item.GetActionType()) } else { - re.Equal(statistics.Update, item.GetActionType()) + re.Equal(utils.Update, item.GetActionType()) } } } @@ -1960,24 +1961,24 @@ func TestHotCacheSortHotPeer(t *testing.T) { sche, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) - leaderSolver := newBalanceSolver(hb, tc, statistics.Read, transferLeader) + leaderSolver := newBalanceSolver(hb, tc, utils.Read, transferLeader) hotPeers := []*statistics.HotPeerStat{{ RegionID: 1, Loads: []float64{ - statistics.QueryDim: 10, - statistics.ByteDim: 1, + utils.QueryDim: 10, + utils.ByteDim: 1, }, }, { RegionID: 2, Loads: []float64{ - statistics.QueryDim: 1, - statistics.ByteDim: 10, + utils.QueryDim: 1, + utils.ByteDim: 10, }, }, { RegionID: 3, Loads: []float64{ - statistics.QueryDim: 5, - statistics.ByteDim: 6, + utils.QueryDim: 5, + utils.ByteDim: 6, }, }} @@ -2019,7 +2020,7 @@ func TestInfluenceByRWType(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -2029,14 +2030,14 @@ func TestInfluenceByRWType(t *testing.T) { tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) - tc.UpdateStorageWrittenStats(1, 99*units.MiB*statistics.StoreHeartBeatReportInterval, 99*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 50*units.MiB*statistics.StoreHeartBeatReportInterval, 98*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 2*units.MiB*statistics.StoreHeartBeatReportInterval, 2*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + tc.UpdateStorageWrittenStats(1, 99*units.MiB*utils.StoreHeartBeatReportInterval, 99*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 50*units.MiB*utils.StoreHeartBeatReportInterval, 98*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 2*units.MiB*utils.StoreHeartBeatReportInterval, 2*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 1*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{2, 1, 3}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, }) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{2, 1, 3}, 0.5 * units.MiB, 0.5 * units.MiB, 0}, }) // must move peer @@ -2048,18 +2049,18 @@ func TestInfluenceByRWType(t *testing.T) { hb.(*hotScheduler).summaryPendingInfluence() stInfos := hb.(*hotScheduler).stInfos - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionWriteKeys], -0.5*units.MiB)) - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionWriteBytes], -0.5*units.MiB)) - re.True(nearlyAbout(stInfos[4].PendingSum.Loads[statistics.RegionWriteKeys], 0.5*units.MiB)) - re.True(nearlyAbout(stInfos[4].PendingSum.Loads[statistics.RegionWriteBytes], 0.5*units.MiB)) - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionReadKeys], -0.5*units.MiB)) - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionReadBytes], -0.5*units.MiB)) - re.True(nearlyAbout(stInfos[4].PendingSum.Loads[statistics.RegionReadKeys], 0.5*units.MiB)) - re.True(nearlyAbout(stInfos[4].PendingSum.Loads[statistics.RegionReadBytes], 0.5*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionWriteKeys], -0.5*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionWriteBytes], -0.5*units.MiB)) + re.True(nearlyAbout(stInfos[4].PendingSum.Loads[utils.RegionWriteKeys], 0.5*units.MiB)) + re.True(nearlyAbout(stInfos[4].PendingSum.Loads[utils.RegionWriteBytes], 0.5*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionReadKeys], -0.5*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionReadBytes], -0.5*units.MiB)) + re.True(nearlyAbout(stInfos[4].PendingSum.Loads[utils.RegionReadKeys], 0.5*units.MiB)) + re.True(nearlyAbout(stInfos[4].PendingSum.Loads[utils.RegionReadBytes], 0.5*units.MiB)) // consider pending amp, there are nine regions or more. for i := 2; i < 13; i++ { - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {uint64(i), []uint64{1, 2, 3}, 0.7 * units.MiB, 0.7 * units.MiB, 0}, }) } @@ -2074,14 +2075,14 @@ func TestInfluenceByRWType(t *testing.T) { hb.(*hotScheduler).summaryPendingInfluence() stInfos = hb.(*hotScheduler).stInfos // assert read/write influence is the sum of write peer and write leader - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionWriteKeys], -1.2*units.MiB)) - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionWriteBytes], -1.2*units.MiB)) - re.True(nearlyAbout(stInfos[3].PendingSum.Loads[statistics.RegionWriteKeys], 0.7*units.MiB)) - re.True(nearlyAbout(stInfos[3].PendingSum.Loads[statistics.RegionWriteBytes], 0.7*units.MiB)) - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionReadKeys], -1.2*units.MiB)) - re.True(nearlyAbout(stInfos[1].PendingSum.Loads[statistics.RegionReadBytes], -1.2*units.MiB)) - re.True(nearlyAbout(stInfos[3].PendingSum.Loads[statistics.RegionReadKeys], 0.7*units.MiB)) - re.True(nearlyAbout(stInfos[3].PendingSum.Loads[statistics.RegionReadBytes], 0.7*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionWriteKeys], -1.2*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionWriteBytes], -1.2*units.MiB)) + re.True(nearlyAbout(stInfos[3].PendingSum.Loads[utils.RegionWriteKeys], 0.7*units.MiB)) + re.True(nearlyAbout(stInfos[3].PendingSum.Loads[utils.RegionWriteBytes], 0.7*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionReadKeys], -1.2*units.MiB)) + re.True(nearlyAbout(stInfos[1].PendingSum.Loads[utils.RegionReadBytes], -1.2*units.MiB)) + re.True(nearlyAbout(stInfos[3].PendingSum.Loads[utils.RegionReadKeys], 0.7*units.MiB)) + re.True(nearlyAbout(stInfos[3].PendingSum.Loads[utils.RegionReadBytes], 0.7*units.MiB)) } func nearlyAbout(f1, f2 float64) bool { @@ -2092,7 +2093,7 @@ func nearlyAbout(f1, f2 float64) bool { } func loadsEqual(loads1, loads2 []float64) bool { - if len(loads1) != statistics.DimLen || len(loads2) != statistics.DimLen { + if len(loads1) != utils.DimLen || len(loads2) != utils.DimLen { return false } for i, load := range loads1 { @@ -2120,10 +2121,10 @@ func checkHotReadPeerSchedule(re *require.Assertions, enablePlacementRules bool) tc.PutStoreWithLabels(id) } - sche, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) + sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) - hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} tc.UpdateStorageReadStats(1, 20*units.MiB, 20*units.MiB) tc.UpdateStorageReadStats(2, 19*units.MiB, 19*units.MiB) @@ -2142,7 +2143,7 @@ func TestHotScheduleWithPriority(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1.05) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1.05) @@ -2161,61 +2162,61 @@ func TestHotScheduleWithPriority(t *testing.T) { tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - tc.UpdateStorageWrittenStats(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 9*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 9*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(1, 10*units.MiB*utils.StoreHeartBeatReportInterval, 9*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 6*units.MiB*utils.StoreHeartBeatReportInterval, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 6*units.MiB*utils.StoreHeartBeatReportInterval, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 9*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 1*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) // must transfer peer schedulePeerPr = 1.0 - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 2 * units.MiB, 1 * units.MiB, 0}, {6, []uint64{4, 2, 3}, 1 * units.MiB, 2 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) clearPendingInfluence(hb.(*hotScheduler)) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.KeyPriority, utils.BytePriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5) clearPendingInfluence(hb.(*hotScheduler)) // assert read priority schedule - hb, err = CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err = CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) - tc.UpdateStorageReadStats(5, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(4, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 7*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 7*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + tc.UpdateStorageReadStats(5, 10*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(4, 10*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(1, 10*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 1*units.MiB*utils.StoreHeartBeatReportInterval, 7*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 7*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 2 * units.MiB, 2 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) clearPendingInfluence(hb.(*hotScheduler)) - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.KeyPriority, utils.BytePriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) - hb, err = CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb, err = CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.RankFormulaVersion = "v1" re.NoError(err) // assert loose store picking - tc.UpdateStorageWrittenStats(1, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 6.1*units.MiB*statistics.StoreHeartBeatReportInterval, 6.1*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + tc.UpdateStorageWrittenStats(1, 10*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 6.1*units.MiB*utils.StoreHeartBeatReportInterval, 6.1*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 6*units.MiB*utils.StoreHeartBeatReportInterval, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 6*units.MiB*utils.StoreHeartBeatReportInterval, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 1*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} hb.(*hotScheduler).conf.StrictPickingStore = true ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -2225,12 +2226,12 @@ func TestHotScheduleWithPriority(t *testing.T) { operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) clearPendingInfluence(hb.(*hotScheduler)) - tc.UpdateStorageWrittenStats(1, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 6.1*units.MiB*statistics.StoreHeartBeatReportInterval, 6.1*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + tc.UpdateStorageWrittenStats(1, 6*units.MiB*utils.StoreHeartBeatReportInterval, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 6.1*units.MiB*utils.StoreHeartBeatReportInterval, 6.1*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 6*units.MiB*utils.StoreHeartBeatReportInterval, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 1*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 1*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.StrictPickingStore = true ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -2248,7 +2249,7 @@ func TestHotScheduleWithStddev(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1.0) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1.0) @@ -2263,15 +2264,15 @@ func TestHotScheduleWithStddev(t *testing.T) { hb.(*hotScheduler).conf.StrictPickingStore = false // skip uniform cluster - tc.UpdateStorageWrittenStats(1, 5*units.MiB*statistics.StoreHeartBeatReportInterval, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 5.3*units.MiB*statistics.StoreHeartBeatReportInterval, 5.3*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 5*units.MiB*statistics.StoreHeartBeatReportInterval, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 5*units.MiB*statistics.StoreHeartBeatReportInterval, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 4.8*units.MiB*statistics.StoreHeartBeatReportInterval, 4.8*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + tc.UpdateStorageWrittenStats(1, 5*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 5.3*units.MiB*utils.StoreHeartBeatReportInterval, 5.3*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 5*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 5*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 4.8*units.MiB*utils.StoreHeartBeatReportInterval, 4.8*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {6, []uint64{3, 4, 2}, 0.1 * units.MiB, 0.1 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} stddevThreshold = 0.1 ops, _ := hb.Schedule(tc, false) re.Empty(ops) @@ -2282,15 +2283,15 @@ func TestHotScheduleWithStddev(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) // skip -1 case (uniform cluster) - tc.UpdateStorageWrittenStats(1, 5*units.MiB*statistics.StoreHeartBeatReportInterval, 100*units.MiB*statistics.StoreHeartBeatReportInterval) // two dims are not uniform. - tc.UpdateStorageWrittenStats(2, 5.3*units.MiB*statistics.StoreHeartBeatReportInterval, 4.8*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 5*units.MiB*statistics.StoreHeartBeatReportInterval, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 5*units.MiB*statistics.StoreHeartBeatReportInterval, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 4.8*units.MiB*statistics.StoreHeartBeatReportInterval, 5*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + tc.UpdateStorageWrittenStats(1, 5*units.MiB*utils.StoreHeartBeatReportInterval, 100*units.MiB*utils.StoreHeartBeatReportInterval) // two dims are not uniform. + tc.UpdateStorageWrittenStats(2, 5.3*units.MiB*utils.StoreHeartBeatReportInterval, 4.8*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 5*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 5*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 4.8*units.MiB*utils.StoreHeartBeatReportInterval, 5*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {6, []uint64{3, 4, 2}, 0.1 * units.MiB, 0.1 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} stddevThreshold = 0.1 ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -2308,7 +2309,7 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) @@ -2318,11 +2319,11 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) - tc.UpdateStorageWrittenStats(1, 31*units.MiB*statistics.StoreHeartBeatReportInterval, 31*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(1, 31*units.MiB*utils.StoreHeartBeatReportInterval, 31*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 10*units.MiB*utils.StoreHeartBeatReportInterval, 1*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 1*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {1, []uint64{1, 2, 3}, 10 * units.MiB, 10 * units.MiB, 0}, {2, []uint64{1, 2, 3}, 10 * units.MiB, 10 * units.MiB, 0}, {3, []uint64{1, 2, 3}, 10 * units.MiB, 10 * units.MiB, 0}, @@ -2334,11 +2335,11 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { defer func() { schedulePeerPr, pendingAmpFactor = old1, old2 }() - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.BytePriority, utils.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) @@ -2351,64 +2352,64 @@ func TestCompatibility(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) // default checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.QueryDim, statistics.ByteDim}, - {statistics.QueryDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // config error value hb.(*hotScheduler).conf.ReadPriorities = []string{"error"} - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{"error", statistics.BytePriority} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{"error", utils.BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.QueryPriority, utils.BytePriority, utils.KeyPriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.QueryDim, statistics.ByteDim}, - {statistics.QueryDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // low version tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version5_0)) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.ByteDim, statistics.KeyDim}, - {statistics.KeyDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.ByteDim, utils.KeyDim}, + {utils.KeyDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // config byte and key - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority} - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.KeyPriority, utils.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.BytePriority, utils.KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.KeyPriority, utils.BytePriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.KeyDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, - {statistics.KeyDim, statistics.ByteDim}, + {utils.KeyDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, + {utils.KeyDim, utils.ByteDim}, }) // config query in low version - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.QueryPriority, statistics.BytePriority} - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.QueryPriority, statistics.BytePriority} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.QueryPriority, utils.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.QueryPriority, utils.BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.QueryPriority, utils.BytePriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.ByteDim, statistics.KeyDim}, - {statistics.KeyDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.ByteDim, utils.KeyDim}, + {utils.KeyDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // config error value hb.(*hotScheduler).conf.ReadPriorities = []string{"error", "error"} hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{utils.QueryPriority, utils.BytePriority, utils.KeyPriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.ByteDim, statistics.KeyDim}, - {statistics.KeyDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.ByteDim, utils.KeyDim}, + {utils.KeyDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // test version change tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.HotScheduleWithQuery)) re.False(hb.(*hotScheduler).conf.lastQuerySupported) // it will updated after scheduling checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.QueryDim, statistics.ByteDim}, - {statistics.QueryDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) re.True(hb.(*hotScheduler).conf.lastQuerySupported) } @@ -2422,9 +2423,9 @@ func TestCompatibilityConfig(t *testing.T) { hb, err := CreateScheduler(HotRegionType, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder("hot-region", nil)) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.QueryDim, statistics.ByteDim}, - {statistics.QueryDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // Config file is not currently supported @@ -2432,9 +2433,9 @@ func TestCompatibilityConfig(t *testing.T) { ConfigSliceDecoder("hot-region", []string{"read-priorities=byte,query"})) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.QueryDim, statistics.ByteDim}, - {statistics.QueryDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.QueryDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // from 4.0 or 5.0 or 5.1 cluster @@ -2459,9 +2460,9 @@ func TestCompatibilityConfig(t *testing.T) { hb, err = CreateScheduler(HotRegionType, oc, storage, ConfigJSONDecoder(data)) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.ByteDim, statistics.KeyDim}, - {statistics.KeyDim, statistics.ByteDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.ByteDim, utils.KeyDim}, + {utils.KeyDim, utils.ByteDim}, + {utils.ByteDim, utils.KeyDim}, }) // From configured cluster @@ -2475,16 +2476,16 @@ func TestCompatibilityConfig(t *testing.T) { hb, err = CreateScheduler(HotRegionType, oc, storage, ConfigJSONDecoder(data)) re.NoError(err) checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ - {statistics.KeyDim, statistics.QueryDim}, - {statistics.QueryDim, statistics.KeyDim}, - {statistics.ByteDim, statistics.KeyDim}, + {utils.KeyDim, utils.QueryDim}, + {utils.QueryDim, utils.KeyDim}, + {utils.ByteDim, utils.KeyDim}, }) } func checkPriority(re *require.Assertions, hb *hotScheduler, tc *mockcluster.Cluster, dims [3][2]int) { - readSolver := newBalanceSolver(hb, tc, statistics.Read, transferLeader) - writeLeaderSolver := newBalanceSolver(hb, tc, statistics.Write, transferLeader) - writePeerSolver := newBalanceSolver(hb, tc, statistics.Write, movePeer) + readSolver := newBalanceSolver(hb, tc, utils.Read, transferLeader) + writeLeaderSolver := newBalanceSolver(hb, tc, utils.Write, transferLeader) + writePeerSolver := newBalanceSolver(hb, tc, utils.Write, movePeer) re.Equal(dims[0][0], readSolver.firstPriority) re.Equal(dims[0][1], readSolver.secondPriority) re.Equal(dims[1][0], writeLeaderSolver.firstPriority) @@ -2547,20 +2548,20 @@ func TestConfigValidation(t *testing.T) { // forbid-rw-type // default hc = initHotRegionScheduleConfig() - re.False(hc.IsForbidRWType(statistics.Read)) - re.False(hc.IsForbidRWType(statistics.Write)) + re.False(hc.IsForbidRWType(utils.Read)) + re.False(hc.IsForbidRWType(utils.Write)) // read hc.ForbidRWType = "read" err = hc.valid() re.NoError(err) - re.True(hc.IsForbidRWType(statistics.Read)) - re.False(hc.IsForbidRWType(statistics.Write)) + re.True(hc.IsForbidRWType(utils.Read)) + re.False(hc.IsForbidRWType(utils.Write)) // write hc.ForbidRWType = "write" err = hc.valid() re.NoError(err) - re.False(hc.IsForbidRWType(statistics.Read)) - re.True(hc.IsForbidRWType(statistics.Write)) + re.False(hc.IsForbidRWType(utils.Read)) + re.True(hc.IsForbidRWType(utils.Write)) // illegal hc.ForbidRWType = "test" err = hc.valid() @@ -2592,30 +2593,30 @@ func TestMaxZombieDuration(t *testing.T) { testCases := []maxZombieDurTestCase{ { typ: readPeer, - maxZombieDur: maxZombieDur * statistics.StoreHeartBeatReportInterval, + maxZombieDur: maxZombieDur * utils.StoreHeartBeatReportInterval, }, { typ: readLeader, - maxZombieDur: maxZombieDur * statistics.StoreHeartBeatReportInterval, + maxZombieDur: maxZombieDur * utils.StoreHeartBeatReportInterval, }, { typ: writePeer, - maxZombieDur: maxZombieDur * statistics.StoreHeartBeatReportInterval, + maxZombieDur: maxZombieDur * utils.StoreHeartBeatReportInterval, }, { typ: writePeer, isTiFlash: true, - maxZombieDur: maxZombieDur * statistics.RegionHeartBeatReportInterval, + maxZombieDur: maxZombieDur * utils.RegionHeartBeatReportInterval, }, { typ: writeLeader, - firstPriority: statistics.KeyDim, - maxZombieDur: maxZombieDur * statistics.RegionHeartBeatReportInterval, + firstPriority: utils.KeyDim, + maxZombieDur: maxZombieDur * utils.RegionHeartBeatReportInterval, }, { typ: writeLeader, - firstPriority: statistics.QueryDim, - maxZombieDur: maxZombieDur * statistics.StoreHeartBeatReportInterval, + firstPriority: utils.QueryDim, + maxZombieDur: maxZombieDur * utils.StoreHeartBeatReportInterval, }, } for _, testCase := range testCases { @@ -2924,8 +2925,8 @@ func TestExpect(t *testing.T) { } bs := &balanceSolver{ sche: hb.(*hotScheduler), - firstPriority: statistics.KeyDim, - secondPriority: statistics.ByteDim, + firstPriority: utils.KeyDim, + secondPriority: utils.ByteDim, resourceTy: testCase.rs, } bs.sche.conf.StrictPickingStore = testCase.strict @@ -2954,32 +2955,32 @@ func TestBucketFirstStat(t *testing.T) { testdata := []struct { firstPriority int secondPriority int - rwTy statistics.RWType - expect statistics.RegionStatKind + rwTy utils.RWType + expect utils.RegionStatKind }{ { - firstPriority: statistics.KeyDim, - secondPriority: statistics.ByteDim, - rwTy: statistics.Write, - expect: statistics.RegionWriteKeys, + firstPriority: utils.KeyDim, + secondPriority: utils.ByteDim, + rwTy: utils.Write, + expect: utils.RegionWriteKeys, }, { - firstPriority: statistics.QueryDim, - secondPriority: statistics.ByteDim, - rwTy: statistics.Write, - expect: statistics.RegionWriteBytes, + firstPriority: utils.QueryDim, + secondPriority: utils.ByteDim, + rwTy: utils.Write, + expect: utils.RegionWriteBytes, }, { - firstPriority: statistics.KeyDim, - secondPriority: statistics.ByteDim, - rwTy: statistics.Read, - expect: statistics.RegionReadKeys, + firstPriority: utils.KeyDim, + secondPriority: utils.ByteDim, + rwTy: utils.Read, + expect: utils.RegionReadKeys, }, { - firstPriority: statistics.QueryDim, - secondPriority: statistics.ByteDim, - rwTy: statistics.Read, - expect: statistics.RegionReadBytes, + firstPriority: utils.QueryDim, + secondPriority: utils.ByteDim, + rwTy: utils.Read, + expect: utils.RegionReadBytes, }, } for _, data := range testdata { diff --git a/pkg/schedule/schedulers/hot_region_v2.go b/pkg/schedule/schedulers/hot_region_v2.go index c81225d959e..c0c4a1c1b9a 100644 --- a/pkg/schedule/schedulers/hot_region_v2.go +++ b/pkg/schedule/schedulers/hot_region_v2.go @@ -20,7 +20,7 @@ import ( "fmt" "math" - "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" ) const ( @@ -105,11 +105,11 @@ func (bs *balanceSolver) filterUniformStoreV2() (string, bool) { } if isUniformFirstPriority && (bs.cur.progressiveRank == -2 || bs.cur.progressiveRank == -3) { // If first priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for second priority dim - return statistics.DimToString(bs.firstPriority), true + return utils.DimToString(bs.firstPriority), true } if isUniformSecondPriority && bs.cur.progressiveRank == -1 { // If second priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for first priority dim - return statistics.DimToString(bs.secondPriority), true + return utils.DimToString(bs.secondPriority), true } return "", false } @@ -374,11 +374,11 @@ func (bs *balanceSolver) rankToDimStringV2() string { case -4: return "all" case -3: - return statistics.DimToString(bs.firstPriority) + return utils.DimToString(bs.firstPriority) case -2: - return statistics.DimToString(bs.firstPriority) + "-only" + return utils.DimToString(bs.firstPriority) + "-only" case -1: - return statistics.DimToString(bs.secondPriority) + return utils.DimToString(bs.secondPriority) default: return "none" } diff --git a/pkg/schedule/schedulers/hot_region_v2_test.go b/pkg/schedule/schedulers/hot_region_v2_test.go index 87d4950b611..a8e2eeb9d50 100644 --- a/pkg/schedule/schedulers/hot_region_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_v2_test.go @@ -22,6 +22,7 @@ import ( "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" @@ -35,7 +36,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { statistics.Denoising = false statisticsInterval = 0 - sche, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetDstToleranceRatio(0.0) @@ -48,14 +49,14 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} - - tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 16*units.MiB*statistics.StoreHeartBeatReportInterval, 20*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 14*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + hb.conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} + + tc.UpdateStorageWrittenStats(1, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 16*units.MiB*utils.StoreHeartBeatReportInterval, 20*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 14*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {6, []uint64{3, 2, 4}, 2 * units.MiB, 3 * units.MiB, 0}, {7, []uint64{1, 4, 5}, 2 * units.MiB, 0.1 * units.MiB, 0}, }) @@ -80,7 +81,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.True(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) // When there is a better solution, there will only be one operator. - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {8, []uint64{3, 2, 4}, 0.5 * units.MiB, 3 * units.MiB, 0}, }) ops, _ = hb.Schedule(tc, false) @@ -97,7 +98,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { defer cancel() statistics.Denoising = false - sche, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetDstToleranceRatio(0.0) @@ -110,14 +111,14 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} - - tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 20*units.MiB*statistics.StoreHeartBeatReportInterval, 14*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 16*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + hb.conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} + + tc.UpdateStorageWrittenStats(1, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 20*units.MiB*utils.StoreHeartBeatReportInterval, 14*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 10*units.MiB*utils.StoreHeartBeatReportInterval, 16*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {6, []uint64{3, 2, 4}, 3 * units.MiB, 1.8 * units.MiB, 0}, {7, []uint64{1, 4, 5}, 0.1 * units.MiB, 2 * units.MiB, 0}, }) @@ -151,7 +152,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(statistics.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetDstToleranceRatio(0.0) @@ -164,14 +165,14 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} - - tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(2, 20*units.MiB*statistics.StoreHeartBeatReportInterval, 14*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(3, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(4, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 16*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenStats(5, 10*units.MiB*statistics.StoreHeartBeatReportInterval, 18*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + hb.conf.WritePeerPriorities = []string{utils.BytePriority, utils.KeyPriority} + + tc.UpdateStorageWrittenStats(1, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(2, 20*units.MiB*utils.StoreHeartBeatReportInterval, 14*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(3, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(4, 15*units.MiB*utils.StoreHeartBeatReportInterval, 16*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenStats(5, 10*units.MiB*utils.StoreHeartBeatReportInterval, 18*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Write, []testRegionInfo{ {6, []uint64{3, 2, 4}, 3 * units.MiB, 3 * units.MiB, 0}, {7, []uint64{1, 4, 5}, 0.1 * units.MiB, 0.1 * units.MiB, 0}, }) @@ -195,7 +196,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { re.True(hb.searchRevertRegions[writePeer]) clearPendingInfluence(hb) // Two operators can be generated when there is a better solution - addRegionInfo(tc, statistics.Write, []testRegionInfo{ + addRegionInfo(tc, utils.Write, []testRegionInfo{ {8, []uint64{1, 4, 5}, 0.1 * units.MiB, 3 * units.MiB, 0}, }) ops, _ = hb.Schedule(tc, false) @@ -214,7 +215,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - sche, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetDstToleranceRatio(0.0) @@ -227,14 +228,14 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} - - tc.UpdateStorageReadStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 16*units.MiB*statistics.StoreHeartBeatReportInterval, 20*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(4, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(5, 14*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} + + tc.UpdateStorageReadStats(1, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 16*units.MiB*utils.StoreHeartBeatReportInterval, 20*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(4, 15*units.MiB*utils.StoreHeartBeatReportInterval, 15*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(5, 14*units.MiB*utils.StoreHeartBeatReportInterval, 10*units.MiB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Read, []testRegionInfo{ {6, []uint64{2, 1, 5}, 2 * units.MiB, 3 * units.MiB, 0}, {7, []uint64{5, 4, 2}, 2 * units.MiB, 0.1 * units.MiB, 0}, }) @@ -259,7 +260,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.True(hb.searchRevertRegions[readLeader]) clearPendingInfluence(hb) // When there is a better solution, there will only be one operator. - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + addRegionInfo(tc, utils.Read, []testRegionInfo{ {8, []uint64{2, 1, 5}, 0.5 * units.MiB, 3 * units.MiB, 0}, }) ops, _ = hb.Schedule(tc, false) @@ -276,12 +277,12 @@ func TestSkipUniformStore(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetRankFormulaVersion("v2") - hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -290,10 +291,10 @@ func TestSkipUniformStore(t *testing.T) { tc.AddRegionStore(5, 20) // Case1: two dim are both enough uniform - tc.UpdateStorageReadStats(1, 10.05*units.MB*statistics.StoreHeartBeatReportInterval, 10.05*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 9.15*units.MB*statistics.StoreHeartBeatReportInterval, 9.15*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 10.0*units.MB*statistics.StoreHeartBeatReportInterval, 10.0*units.MB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + tc.UpdateStorageReadStats(1, 10.05*units.MB*utils.StoreHeartBeatReportInterval, 10.05*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 9.15*units.MB*utils.StoreHeartBeatReportInterval, 9.15*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 10.0*units.MB*utils.StoreHeartBeatReportInterval, 10.0*units.MB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0.3 * units.MB, 0.3 * units.MB, 0}, }) // when there is no uniform store filter, still schedule although the cluster is enough uniform @@ -309,10 +310,10 @@ func TestSkipUniformStore(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) // Case2: the first dim is enough uniform, we should schedule the second dim - tc.UpdateStorageReadStats(1, 10.15*units.MB*statistics.StoreHeartBeatReportInterval, 10.05*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 9.25*units.MB*statistics.StoreHeartBeatReportInterval, 9.85*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 9.85*units.MB*statistics.StoreHeartBeatReportInterval, 16.0*units.MB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + tc.UpdateStorageReadStats(1, 10.15*units.MB*utils.StoreHeartBeatReportInterval, 10.05*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 9.25*units.MB*utils.StoreHeartBeatReportInterval, 9.85*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 9.85*units.MB*utils.StoreHeartBeatReportInterval, 16.0*units.MB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0.3 * units.MB, 0.3 * units.MB, 0}, {2, []uint64{3, 2, 1}, 0.3 * units.MB, 2 * units.MB, 0}, }) @@ -330,10 +331,10 @@ func TestSkipUniformStore(t *testing.T) { clearPendingInfluence(hb.(*hotScheduler)) // Case3: the second dim is enough uniform, we should schedule the first dim, although its rank is higher than the second dim - tc.UpdateStorageReadStats(1, 10.05*units.MB*statistics.StoreHeartBeatReportInterval, 10.05*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(2, 9.85*units.MB*statistics.StoreHeartBeatReportInterval, 9.45*units.MB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(3, 16*units.MB*statistics.StoreHeartBeatReportInterval, 9.85*units.MB*statistics.StoreHeartBeatReportInterval) - addRegionInfo(tc, statistics.Read, []testRegionInfo{ + tc.UpdateStorageReadStats(1, 10.05*units.MB*utils.StoreHeartBeatReportInterval, 10.05*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(2, 9.85*units.MB*utils.StoreHeartBeatReportInterval, 9.45*units.MB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(3, 16*units.MB*utils.StoreHeartBeatReportInterval, 9.85*units.MB*utils.StoreHeartBeatReportInterval) + addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0.3 * units.MB, 0.3 * units.MB, 0}, {2, []uint64{3, 2, 1}, 2 * units.MB, 0.3 * units.MB, 0}, }) @@ -361,8 +362,8 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { re := require.New(t) emptyFunc := func(*mockcluster.Cluster, *hotScheduler) {} highLoad, lowLoad := uint64(2000), uint64(200) - bigHotRegionByte := uint64(float64(lowLoad) * firstPriorityMinHotRatio * 10 * units.MiB * statistics.ReadReportInterval) - bigHotRegionQuery := uint64(float64(lowLoad) * firstPriorityMinHotRatio * 10 * statistics.ReadReportInterval) + bigHotRegionByte := uint64(float64(lowLoad) * firstPriorityMinHotRatio * 10 * units.MiB * utils.StoreHeartBeatReportInterval) + bigHotRegionQuery := uint64(float64(lowLoad) * firstPriorityMinHotRatio * 10 * utils.StoreHeartBeatReportInterval) // Case1: Before #6827, we only use minHotRatio, so cannot schedule small hot region in this case. // Because 10000 is larger than the length of hotRegions, so `filterHotPeers` will skip the topn calculation. @@ -381,7 +382,7 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { // Case3: If there is larger hot region, we will schedule it. hotRegionID := uint64(100) ops = checkHotReadRegionScheduleWithSmallHotRegion(re, highLoad, lowLoad, func(tc *mockcluster.Cluster, _ *hotScheduler) { - tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) }) re.Len(ops, 1) re.Equal(hotRegionID, ops[0].RegionID()) @@ -389,15 +390,15 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { // Case4: If there is larger hot region, but it need to cool down, we will schedule small hot region. ops = checkHotReadRegionScheduleWithSmallHotRegion(re, highLoad, lowLoad, func(tc *mockcluster.Cluster, _ *hotScheduler) { // just transfer leader - tc.AddRegionWithReadInfo(hotRegionID, 2, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{1, 3}) - tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 2, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{1, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) }) re.Len(ops, 1) re.NotEqual(hotRegionID, ops[0].RegionID()) // Case5: If there is larger hot region, but it is pending, we will schedule small hot region. ops = checkHotReadRegionScheduleWithSmallHotRegion(re, highLoad, lowLoad, func(tc *mockcluster.Cluster, hb *hotScheduler) { - tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) hb.regionPendings[hotRegionID] = &pendingInfluence{} }) re.Len(ops, 1) @@ -408,11 +409,11 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { topnPosition = 2 ops = checkHotReadRegionScheduleWithSmallHotRegion(re, highLoad, lowLoad, func(tc *mockcluster.Cluster, _ *hotScheduler) { // just transfer leader - tc.AddRegionWithReadInfo(hotRegionID, 2, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{1, 3}) - tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 2, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{1, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) // just transfer leader - tc.AddRegionWithReadInfo(hotRegionID+1, 2, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{1, 3}) - tc.AddRegionWithReadInfo(hotRegionID+1, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID+1, 2, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{1, 3}) + tc.AddRegionWithReadInfo(hotRegionID+1, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) }) re.Len(ops, 0) topnPosition = origin @@ -421,9 +422,9 @@ func TestHotReadRegionScheduleWithSmallHotRegion(t *testing.T) { // we will schedule large hot region rather than small hot region, so there is no operator. topnPosition = 2 ops = checkHotReadRegionScheduleWithSmallHotRegion(re, highLoad, lowLoad, func(tc *mockcluster.Cluster, hb *hotScheduler) { - tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) hb.regionPendings[hotRegionID] = &pendingInfluence{} - tc.AddRegionWithReadInfo(hotRegionID+1, 1, bigHotRegionByte, 0, bigHotRegionQuery, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(hotRegionID+1, 1, bigHotRegionByte, 0, bigHotRegionQuery, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) hb.regionPendings[hotRegionID+1] = &pendingInfluence{} }) re.Len(ops, 0) @@ -435,24 +436,24 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo cancel, _, tc, oc := prepareSchedulersTest() defer cancel() statistics.Denoising = false - sche, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) + sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) hb.conf.SetSrcToleranceRatio(1) hb.conf.SetDstToleranceRatio(1) hb.conf.SetRankFormulaVersion("v2") - hb.conf.ReadPriorities = []string{statistics.QueryPriority, statistics.BytePriority} + hb.conf.ReadPriorities = []string{utils.QueryPriority, utils.BytePriority} tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 40) tc.AddRegionStore(2, 10) tc.AddRegionStore(3, 10) - tc.UpdateStorageReadQuery(1, highLoad*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadQuery(2, lowLoad*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadQuery(3, (highLoad+lowLoad)/2*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadStats(1, highLoad*units.MiB*statistics.StoreHeartBeatReportInterval, 0) - tc.UpdateStorageReadStats(2, lowLoad*units.MiB*statistics.StoreHeartBeatReportInterval, 0) - tc.UpdateStorageReadStats(3, (highLoad+lowLoad)/2*units.MiB*statistics.StoreHeartBeatReportInterval, 0) + tc.UpdateStorageReadQuery(1, highLoad*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadQuery(2, lowLoad*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadQuery(3, (highLoad+lowLoad)/2*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadStats(1, highLoad*units.MiB*utils.StoreHeartBeatReportInterval, 0) + tc.UpdateStorageReadStats(2, lowLoad*units.MiB*utils.StoreHeartBeatReportInterval, 0) + tc.UpdateStorageReadStats(3, (highLoad+lowLoad)/2*units.MiB*utils.StoreHeartBeatReportInterval, 0) smallHotPeerQuery := float64(lowLoad) * firstPriorityMinHotRatio * 0.9 // it's a small hot region than the firstPriorityMinHotRatio smallHotPeerByte := float64(lowLoad) * secondPriorityMinHotRatio * 0.9 * units.MiB // it's a small hot region than the secondPriorityMinHotRatio @@ -464,7 +465,7 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo regions = append(regions, testRegionInfo{uint64(i), []uint64{3, 1, 2}, smallHotPeerByte, 0, smallHotPeerQuery}) } } - addRegionInfo(tc, statistics.Read, regions) + addRegionInfo(tc, utils.Read, regions) tc.SetHotRegionCacheHitsThreshold(1) addOtherRegions(tc, hb) ops, _ := hb.Schedule(tc, false) diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 54ee9366f45..12ab9f8aa2f 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -28,7 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" @@ -167,10 +167,10 @@ func checkBalance(re *require.Assertions, enablePlacementRules bool) { tc.AddLabelsStore(6, 0, map[string]string{"zone": "z4", "host": "h6"}) // Report store written bytes. - tc.UpdateStorageWrittenBytes(1, 7.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 4.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 4.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(4, 6*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 7.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 4.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 4.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(4, 6*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenBytes(5, 0) tc.UpdateStorageWrittenBytes(6, 0) @@ -180,9 +180,9 @@ func checkBalance(re *require.Assertions, enablePlacementRules bool) { // | 1 | 1 | 2 | 3 | 512KB | // | 2 | 1 | 3 | 4 | 512KB | // | 3 | 1 | 2 | 4 | 512KB | - tc.AddLeaderRegionWithWriteInfo(1, 1, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3}) - tc.AddLeaderRegionWithWriteInfo(2, 1, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{3, 4}) - tc.AddLeaderRegionWithWriteInfo(3, 1, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 4}) + tc.AddLeaderRegionWithWriteInfo(1, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}) + tc.AddLeaderRegionWithWriteInfo(2, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{3, 4}) + tc.AddLeaderRegionWithWriteInfo(3, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 4}) tc.SetHotRegionCacheHitsThreshold(0) // try to get an operator @@ -203,7 +203,7 @@ func TestHotRegionScheduleAbnormalReplica(t *testing.T) { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetHotRegionScheduleLimit(0) - hb, err := CreateScheduler(statistics.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) + hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) tc.AddRegionStore(1, 3) @@ -211,13 +211,13 @@ func TestHotRegionScheduleAbnormalReplica(t *testing.T) { tc.AddRegionStore(3, 2) // Report store read bytes. - tc.UpdateStorageReadBytes(1, 7.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(2, 4.5*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageReadBytes(3, 4.5*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(1, 7.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(2, 4.5*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageReadBytes(3, 4.5*units.MiB*utils.StoreHeartBeatReportInterval) - tc.AddRegionWithReadInfo(1, 1, 512*units.KiB*statistics.ReadReportInterval, 0, 0, statistics.ReadReportInterval, []uint64{2}) - tc.AddRegionWithReadInfo(2, 2, 512*units.KiB*statistics.ReadReportInterval, 0, 0, statistics.ReadReportInterval, []uint64{1, 3}) - tc.AddRegionWithReadInfo(3, 1, 512*units.KiB*statistics.ReadReportInterval, 0, 0, statistics.ReadReportInterval, []uint64{2, 3}) + tc.AddRegionWithReadInfo(1, 1, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{2}) + tc.AddRegionWithReadInfo(2, 2, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{1, 3}) + tc.AddRegionWithReadInfo(3, 1, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) tc.SetHotRegionCacheHitsThreshold(0) re.True(tc.IsRegionHot(tc.GetRegion(1))) re.False(hb.IsScheduleAllowed(tc)) @@ -315,7 +315,7 @@ func TestSpecialUseHotRegion(t *testing.T) { cd := ConfigSliceDecoder(BalanceRegionType, []string{"", ""}) bs, err := CreateScheduler(BalanceRegionType, oc, storage, cd) re.NoError(err) - hs, err := CreateScheduler(statistics.Write.String(), oc, storage, cd) + hs, err := CreateScheduler(utils.Write.String(), oc, storage, cd) re.NoError(err) tc.SetHotRegionCacheHitsThreshold(0) @@ -343,16 +343,16 @@ func TestSpecialUseHotRegion(t *testing.T) { re.Empty(ops) // can only move peer to 4 - tc.UpdateStorageWrittenBytes(1, 60*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(2, 6*units.MiB*statistics.StoreHeartBeatReportInterval) - tc.UpdateStorageWrittenBytes(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(1, 60*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(2, 6*units.MiB*utils.StoreHeartBeatReportInterval) + tc.UpdateStorageWrittenBytes(3, 6*units.MiB*utils.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenBytes(4, 0) tc.UpdateStorageWrittenBytes(5, 0) - tc.AddLeaderRegionWithWriteInfo(1, 1, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3}) - tc.AddLeaderRegionWithWriteInfo(2, 1, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3}) - tc.AddLeaderRegionWithWriteInfo(3, 1, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{2, 3}) - tc.AddLeaderRegionWithWriteInfo(4, 2, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{1, 3}) - tc.AddLeaderRegionWithWriteInfo(5, 3, 512*units.KiB*statistics.WriteReportInterval, 0, 0, statistics.WriteReportInterval, []uint64{1, 2}) + tc.AddLeaderRegionWithWriteInfo(1, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}) + tc.AddLeaderRegionWithWriteInfo(2, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}) + tc.AddLeaderRegionWithWriteInfo(3, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}) + tc.AddLeaderRegionWithWriteInfo(4, 2, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{1, 3}) + tc.AddLeaderRegionWithWriteInfo(5, 3, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{1, 2}) ops, _ = hs.Schedule(tc, false) re.Len(ops, 1) operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 4) diff --git a/pkg/schedule/splitter/region_splitter.go b/pkg/schedule/splitter/region_splitter.go index f20346d5b59..f0da8442a2c 100644 --- a/pkg/schedule/splitter/region_splitter.go +++ b/pkg/schedule/splitter/region_splitter.go @@ -56,15 +56,17 @@ func NewSplitRegionsHandler(cluster sche.ClusterInformer, oc *operator.Controlle // RegionSplitter handles split regions type RegionSplitter struct { - cluster sche.ClusterInformer - handler SplitRegionsHandler + cluster sche.ClusterInformer + handler SplitRegionsHandler + addSuspectRegions func(ids ...uint64) } // NewRegionSplitter return a region splitter -func NewRegionSplitter(cluster sche.ClusterInformer, handler SplitRegionsHandler) *RegionSplitter { +func NewRegionSplitter(cluster sche.ClusterInformer, handler SplitRegionsHandler, addSuspectRegions func(ids ...uint64)) *RegionSplitter { return &RegionSplitter{ - cluster: cluster, - handler: handler, + cluster: cluster, + handler: handler, + addSuspectRegions: addSuspectRegions, } } @@ -170,7 +172,7 @@ func (r *RegionSplitter) groupKeysByRegion(keys [][]byte) map[uint64]*regionGrou func (r *RegionSplitter) checkRegionValid(region *core.RegionInfo) bool { if !filter.IsRegionReplicated(r.cluster, region) { - r.cluster.AddSuspectRegions(region.GetID()) + r.addSuspectRegions(region.GetID()) return false } if region.GetLeader() == nil { diff --git a/pkg/schedule/splitter/region_splitter_test.go b/pkg/schedule/splitter/region_splitter_test.go index f293446e6cd..8753d8bf2ec 100644 --- a/pkg/schedule/splitter/region_splitter_test.go +++ b/pkg/schedule/splitter/region_splitter_test.go @@ -86,7 +86,7 @@ func (suite *regionSplitterTestSuite) TestRegionSplitter() { tc := mockcluster.NewCluster(suite.ctx, opt) handler := newMockSplitRegionsHandler() tc.AddLeaderRegionWithRange(1, "eee", "hhh", 2, 3, 4) - splitter := NewRegionSplitter(tc, handler) + splitter := NewRegionSplitter(tc, handler, tc.AddSuspectRegions) newRegions := map[uint64]struct{}{} // assert success failureKeys := splitter.splitRegionsByKeys(suite.ctx, [][]byte{[]byte("fff"), []byte("ggg")}, newRegions) @@ -115,7 +115,7 @@ func (suite *regionSplitterTestSuite) TestGroupKeysByRegion() { tc.AddLeaderRegionWithRange(1, "aaa", "ccc", 2, 3, 4) tc.AddLeaderRegionWithRange(2, "ccc", "eee", 2, 3, 4) tc.AddLeaderRegionWithRange(3, "fff", "ggg", 2, 3, 4) - splitter := NewRegionSplitter(tc, handler) + splitter := NewRegionSplitter(tc, handler, tc.AddSuspectRegions) groupKeys := splitter.groupKeysByRegion([][]byte{ []byte("bbb"), []byte("ddd"), diff --git a/pkg/statistics/buckets/bucket_stat_informer.go b/pkg/statistics/buckets/bucket_stat_informer.go index ba917a864c7..888027f1f5f 100644 --- a/pkg/statistics/buckets/bucket_stat_informer.go +++ b/pkg/statistics/buckets/bucket_stat_informer.go @@ -21,15 +21,15 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/rangetree" "github.com/tikv/pd/pkg/slice" - "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/keyutil" ) -var minHotThresholds [statistics.RegionStatCount]uint64 +var minHotThresholds [utils.RegionStatCount]uint64 func init() { for i := range minHotThresholds { - minHotThresholds[i] = uint64(statistics.MinHotThresholds[i]) + minHotThresholds[i] = uint64(utils.MinHotThresholds[i]) } } @@ -45,7 +45,7 @@ type BucketStat struct { EndKey []byte HotDegree int Interval uint64 - // the order should see statistics.RegionStatKind + // the order should see utils.RegionStatKind Loads []uint64 } diff --git a/pkg/statistics/buckets/hot_bucket_cache.go b/pkg/statistics/buckets/hot_bucket_cache.go index f8b666cf4be..c4ae785bfa4 100644 --- a/pkg/statistics/buckets/hot_bucket_cache.go +++ b/pkg/statistics/buckets/hot_bucket_cache.go @@ -161,6 +161,15 @@ func (h *HotBucketCache) CheckAsync(task flowBucketsItemTask) bool { } } +// BucketsStats returns hot region's buckets stats. +func (h *HotBucketCache) BucketsStats(degree int, regionIDs ...uint64) map[uint64][]*BucketStat { + task := NewCollectBucketStatsTask(degree, regionIDs...) + if !h.CheckAsync(task) { + return nil + } + return task.WaitRet(h.ctx) +} + func (h *HotBucketCache) schedule() { defer logutil.LogPanic() diff --git a/pkg/statistics/collector.go b/pkg/statistics/collector.go index f4785103c89..e64b673803d 100644 --- a/pkg/statistics/collector.go +++ b/pkg/statistics/collector.go @@ -17,6 +17,7 @@ package statistics import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" + "github.com/tikv/pd/pkg/statistics/utils" ) // storeCollector define the behavior of different engines of stores. @@ -26,7 +27,7 @@ type storeCollector interface { // Filter determines whether the Store needs to be handled by itself. Filter(info *StoreSummaryInfo, kind constant.ResourceKind) bool // GetLoads obtains available loads from storeLoads and peerLoadSum according to rwTy and kind. - GetLoads(storeLoads, peerLoadSum []float64, rwTy RWType, kind constant.ResourceKind) (loads []float64) + GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) } type tikvCollector struct{} @@ -52,26 +53,26 @@ func (c tikvCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceKind return false } -func (c tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy RWType, kind constant.ResourceKind) (loads []float64) { - loads = make([]float64, DimLen) +func (c tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { + loads = make([]float64, utils.DimLen) switch rwTy { - case Read: - loads[ByteDim] = storeLoads[StoreReadBytes] - loads[KeyDim] = storeLoads[StoreReadKeys] - loads[QueryDim] = storeLoads[StoreReadQuery] - case Write: + case utils.Read: + loads[utils.ByteDim] = storeLoads[utils.StoreReadBytes] + loads[utils.KeyDim] = storeLoads[utils.StoreReadKeys] + loads[utils.QueryDim] = storeLoads[utils.StoreReadQuery] + case utils.Write: switch kind { case constant.LeaderKind: // Use sum of hot peers to estimate leader-only byte rate. // For Write requests, Write{Bytes, Keys} is applied to all Peers at the same time, // while the Leader and Follower are under different loads (usually the Leader consumes more CPU). // Write{Query} does not require such processing. - loads[ByteDim] = peerLoadSum[ByteDim] - loads[KeyDim] = peerLoadSum[KeyDim] - loads[QueryDim] = storeLoads[StoreWriteQuery] + loads[utils.ByteDim] = peerLoadSum[utils.ByteDim] + loads[utils.KeyDim] = peerLoadSum[utils.KeyDim] + loads[utils.QueryDim] = storeLoads[utils.StoreWriteQuery] case constant.RegionKind: - loads[ByteDim] = storeLoads[StoreWriteBytes] - loads[KeyDim] = storeLoads[StoreWriteKeys] + loads[utils.ByteDim] = storeLoads[utils.StoreWriteBytes] + loads[utils.KeyDim] = storeLoads[utils.StoreWriteKeys] // The `Write-peer` does not have `QueryDim` } } @@ -100,12 +101,12 @@ func (c tiflashCollector) Filter(info *StoreSummaryInfo, kind constant.ResourceK return false } -func (c tiflashCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy RWType, kind constant.ResourceKind) (loads []float64) { - loads = make([]float64, DimLen) +func (c tiflashCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy utils.RWType, kind constant.ResourceKind) (loads []float64) { + loads = make([]float64, utils.DimLen) switch rwTy { - case Read: + case utils.Read: // TODO: Need TiFlash StoreHeartbeat support - case Write: + case utils.Write: switch kind { case constant.LeaderKind: // There is no Leader on TiFlash @@ -113,11 +114,11 @@ func (c tiflashCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy RWTyp // TiFlash is currently unable to report statistics in the same unit as Region, // so it uses the sum of Regions. If it is not accurate enough, use sum of hot peer. if c.isTraceRegionFlow { - loads[ByteDim] = storeLoads[StoreRegionsWriteBytes] - loads[KeyDim] = storeLoads[StoreRegionsWriteKeys] + loads[utils.ByteDim] = storeLoads[utils.StoreRegionsWriteBytes] + loads[utils.KeyDim] = storeLoads[utils.StoreRegionsWriteKeys] } else { - loads[ByteDim] = peerLoadSum[ByteDim] - loads[KeyDim] = peerLoadSum[KeyDim] + loads[utils.ByteDim] = peerLoadSum[utils.ByteDim] + loads[utils.KeyDim] = peerLoadSum[utils.KeyDim] } // The `Write-peer` does not have `QueryDim` } diff --git a/pkg/statistics/hot_cache.go b/pkg/statistics/hot_cache.go index 42c1e6c49a7..de7189a1332 100644 --- a/pkg/statistics/hot_cache.go +++ b/pkg/statistics/hot_cache.go @@ -19,14 +19,15 @@ import ( "github.com/smallnest/chanx" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/logutil" ) const chanMaxLength = 6000000 var ( - readTaskMetrics = hotCacheFlowQueueStatusGauge.WithLabelValues(Read.String()) - writeTaskMetrics = hotCacheFlowQueueStatusGauge.WithLabelValues(Write.String()) + readTaskMetrics = hotCacheFlowQueueStatusGauge.WithLabelValues(utils.Read.String()) + writeTaskMetrics = hotCacheFlowQueueStatusGauge.WithLabelValues(utils.Write.String()) ) // HotCache is a cache hold hot regions. @@ -40,8 +41,8 @@ type HotCache struct { func NewHotCache(ctx context.Context) *HotCache { w := &HotCache{ ctx: ctx, - writeCache: NewHotPeerCache(ctx, Write), - readCache: NewHotPeerCache(ctx, Read), + writeCache: NewHotPeerCache(ctx, utils.Write), + readCache: NewHotPeerCache(ctx, utils.Read), } go w.updateItems(w.readCache.taskQueue, w.runReadTask) go w.updateItems(w.writeCache.taskQueue, w.runWriteTask) @@ -75,13 +76,13 @@ func (w *HotCache) CheckReadAsync(task FlowItemTask) bool { } // RegionStats returns hot items according to kind -func (w *HotCache) RegionStats(kind RWType, minHotDegree int) map[uint64][]*HotPeerStat { +func (w *HotCache) RegionStats(kind utils.RWType, minHotDegree int) map[uint64][]*HotPeerStat { task := newCollectRegionStatsTask(minHotDegree) var succ bool switch kind { - case Write: + case utils.Write: succ = w.CheckWriteAsync(task) - case Read: + case utils.Read: succ = w.CheckReadAsync(task) } if !succ { @@ -103,13 +104,13 @@ func (w *HotCache) IsRegionHot(region *core.RegionInfo, minHotDegree int) bool { } // GetHotPeerStat returns hot peer stat with specified regionID and storeID. -func (w *HotCache) GetHotPeerStat(kind RWType, regionID, storeID uint64) *HotPeerStat { +func (w *HotCache) GetHotPeerStat(kind utils.RWType, regionID, storeID uint64) *HotPeerStat { task := newGetHotPeerStatTask(regionID, storeID) var succ bool switch kind { - case Read: + case utils.Read: succ = w.CheckReadAsync(task) - case Write: + case utils.Write: succ = w.CheckWriteAsync(task) } if !succ { @@ -160,11 +161,11 @@ func (w *HotCache) runWriteTask(task FlowItemTask) { // Update updates the cache. // This is used for mockcluster, for test purpose. -func (w *HotCache) Update(item *HotPeerStat, kind RWType) { +func (w *HotCache) Update(item *HotPeerStat, kind utils.RWType) { switch kind { - case Write: + case utils.Write: w.writeCache.updateStat(item) - case Read: + case utils.Read: w.readCache.updateStat(item) } } @@ -195,11 +196,11 @@ func (w *HotCache) ExpiredWriteItems(region *core.RegionInfo) []*HotPeerStat { // GetThresholds returns thresholds. // This is used for test purpose. -func (w *HotCache) GetThresholds(kind RWType, storeID uint64) []float64 { +func (w *HotCache) GetThresholds(kind utils.RWType, storeID uint64) []float64 { switch kind { - case Write: + case utils.Write: return w.writeCache.calcHotThresholds(storeID) - case Read: + case utils.Read: return w.readCache.calcHotThresholds(storeID) } return nil diff --git a/pkg/statistics/hot_peer.go b/pkg/statistics/hot_peer.go index 02c50697358..79757d6e27f 100644 --- a/pkg/statistics/hot_peer.go +++ b/pkg/statistics/hot_peer.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/movingaverage" "github.com/tikv/pd/pkg/slice" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" ) @@ -34,7 +35,7 @@ type dimStat struct { func newDimStat(reportInterval time.Duration) *dimStat { return &dimStat{ - rolling: movingaverage.NewTimeMedian(DefaultAotSize, rollingWindowsSize, reportInterval), + rolling: movingaverage.NewTimeMedian(utils.DefaultAotSize, rollingWindowsSize, reportInterval), lastIntervalSum: 0, lastDelta: 0, } @@ -103,7 +104,7 @@ type HotPeerStat struct { // stores contains the all peer's storeID in this region. stores []uint64 // actionType is the action type of the region, add, update or remove. - actionType ActionType + actionType utils.ActionType // isLeader is true means that the region has a leader on this store. isLeader bool // lastTransferLeaderTime is used to cool down frequent transfer leader. @@ -121,8 +122,8 @@ func (stat *HotPeerStat) ID() uint64 { return stat.RegionID } -// Less compares two HotPeerStat.Implementing TopNItem. -func (stat *HotPeerStat) Less(dim int, than TopNItem) bool { +// Less compares two HotPeerStat. Implementing TopNItem. +func (stat *HotPeerStat) Less(dim int, than utils.TopNItem) bool { return stat.GetLoad(dim) < than.(*HotPeerStat).GetLoad(dim) } @@ -144,7 +145,7 @@ func (stat *HotPeerStat) Log(str string) { } // IsNeedCoolDownTransferLeader use cooldown time after transfer leader to avoid unnecessary schedule -func (stat *HotPeerStat) IsNeedCoolDownTransferLeader(minHotDegree int, rwTy RWType) bool { +func (stat *HotPeerStat) IsNeedCoolDownTransferLeader(minHotDegree int, rwTy utils.RWType) bool { return time.Since(stat.lastTransferLeaderTime).Seconds() < float64(minHotDegree*rwTy.ReportInterval()) } @@ -154,7 +155,7 @@ func (stat *HotPeerStat) IsLeader() bool { } // GetActionType returns the item action type. -func (stat *HotPeerStat) GetActionType() ActionType { +func (stat *HotPeerStat) GetActionType() utils.ActionType { return stat.actionType } @@ -181,8 +182,8 @@ func (stat *HotPeerStat) GetLoads() []float64 { // Clone clones the HotPeerStat. func (stat *HotPeerStat) Clone() *HotPeerStat { ret := *stat - ret.Loads = make([]float64, DimLen) - for i := 0; i < DimLen; i++ { + ret.Loads = make([]float64, utils.DimLen) + for i := 0; i < utils.DimLen; i++ { ret.Loads[i] = stat.GetLoad(i) // replace with denoising loads } ret.rollingLoads = nil diff --git a/pkg/statistics/hot_peer_cache.go b/pkg/statistics/hot_peer_cache.go index 16c64b752e0..1ac07289a3c 100644 --- a/pkg/statistics/hot_peer_cache.go +++ b/pkg/statistics/hot_peer_cache.go @@ -16,15 +16,16 @@ package statistics import ( "context" + "fmt" "math" "time" - "github.com/docker/go-units" "github.com/pingcap/kvproto/pkg/metapb" "github.com/prometheus/client_golang/prometheus" "github.com/smallnest/chanx" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/slice" + "github.com/tikv/pd/pkg/statistics/utils" ) const ( @@ -32,19 +33,12 @@ const ( TopNN = 60 // HotThresholdRatio is used to calculate hot thresholds HotThresholdRatio = 0.8 - // WriteReportInterval indicates the interval between write interval - WriteReportInterval = RegionHeartBeatReportInterval - // ReadReportInterval indicates the interval between read stats report - ReadReportInterval = StoreHeartBeatReportInterval rollingWindowsSize = 5 // HotRegionReportMinInterval is used for the simulator and test HotRegionReportMinInterval = 3 - // HotRegionAntiCount is default value for antiCount - HotRegionAntiCount = 2 - queueCap = 20000 ) @@ -56,47 +50,37 @@ var ThresholdsUpdateInterval = 8 * time.Second // only turn off by the simulator and the test. var Denoising = true -// MinHotThresholds is the threshold at which this dimension is recorded as a hot spot. -var MinHotThresholds = [RegionStatCount]float64{ - RegionReadBytes: 8 * units.KiB, - RegionReadKeys: 128, - RegionReadQueryNum: 128, - RegionWriteBytes: 1 * units.KiB, - RegionWriteKeys: 32, - RegionWriteQueryNum: 32, -} - type thresholds struct { updatedTime time.Time rates []float64 topNLen int - metrics [DimLen + 1]prometheus.Gauge // 0 is for byte, 1 is for key, 2 is for query, 3 is for total length. + metrics [utils.DimLen + 1]prometheus.Gauge // 0 is for byte, 1 is for key, 2 is for query, 3 is for total length. } // hotPeerCache saves the hot peer's statistics. type hotPeerCache struct { - kind RWType - peersOfStore map[uint64]*TopN // storeID -> hot peers + kind utils.RWType + peersOfStore map[uint64]*utils.TopN // storeID -> hot peers storesOfRegion map[uint64]map[uint64]struct{} // regionID -> storeIDs regionsOfStore map[uint64]map[uint64]struct{} // storeID -> regionIDs topNTTL time.Duration taskQueue *chanx.UnboundedChan[FlowItemTask] - thresholdsOfStore map[uint64]*thresholds // storeID -> thresholds - metrics map[uint64][ActionTypeLen]prometheus.Gauge // storeID -> metrics + thresholdsOfStore map[uint64]*thresholds // storeID -> thresholds + metrics map[uint64][utils.ActionTypeLen]prometheus.Gauge // storeID -> metrics // TODO: consider to remove store info when store is offline. } // NewHotPeerCache creates a hotPeerCache -func NewHotPeerCache(ctx context.Context, kind RWType) *hotPeerCache { +func NewHotPeerCache(ctx context.Context, kind utils.RWType) *hotPeerCache { return &hotPeerCache{ kind: kind, - peersOfStore: make(map[uint64]*TopN), + peersOfStore: make(map[uint64]*utils.TopN), storesOfRegion: make(map[uint64]map[uint64]struct{}), regionsOfStore: make(map[uint64]map[uint64]struct{}), taskQueue: chanx.NewUnboundedChan[FlowItemTask](ctx, queueCap), thresholdsOfStore: make(map[uint64]*thresholds), topNTTL: time.Duration(3*kind.ReportInterval()) * time.Second, - metrics: make(map[uint64][ActionTypeLen]prometheus.Gauge), + metrics: make(map[uint64][utils.ActionTypeLen]prometheus.Gauge), } } @@ -120,10 +104,10 @@ func (f *hotPeerCache) RegionStats(minHotDegree int) map[uint64][]*HotPeerStat { func (f *hotPeerCache) updateStat(item *HotPeerStat) { switch item.actionType { - case Remove: + case utils.Remove: f.removeItem(item) item.Log("region heartbeat remove from cache") - case Add, Update: + case utils.Add, utils.Update: f.putItem(item) item.Log("region heartbeat update") default: @@ -132,14 +116,14 @@ func (f *hotPeerCache) updateStat(item *HotPeerStat) { f.incMetrics(item.actionType, item.StoreID) } -func (f *hotPeerCache) incMetrics(action ActionType, storeID uint64) { +func (f *hotPeerCache) incMetrics(action utils.ActionType, storeID uint64) { if _, ok := f.metrics[storeID]; !ok { store := storeTag(storeID) kind := f.kind.String() - f.metrics[storeID] = [ActionTypeLen]prometheus.Gauge{ - Add: hotCacheStatusGauge.WithLabelValues("add_item", store, kind), - Remove: hotCacheStatusGauge.WithLabelValues("remove_item", store, kind), - Update: hotCacheStatusGauge.WithLabelValues("update_item", store, kind), + f.metrics[storeID] = [utils.ActionTypeLen]prometheus.Gauge{ + utils.Add: hotCacheStatusGauge.WithLabelValues("add_item", store, kind), + utils.Remove: hotCacheStatusGauge.WithLabelValues("remove_item", store, kind), + utils.Update: hotCacheStatusGauge.WithLabelValues("update_item", store, kind), } } f.metrics[storeID][action].Inc() @@ -153,17 +137,17 @@ func (f *hotPeerCache) collectPeerMetrics(loads []float64, interval uint64) { // TODO: use unified metrics. (keep backward compatibility at the same time) for _, k := range f.kind.RegionStats() { switch k { - case RegionReadBytes: + case utils.RegionReadBytes: readByteHist.Observe(loads[int(k)]) - case RegionReadKeys: + case utils.RegionReadKeys: readKeyHist.Observe(loads[int(k)]) - case RegionWriteBytes: + case utils.RegionWriteBytes: writeByteHist.Observe(loads[int(k)]) - case RegionWriteKeys: + case utils.RegionWriteKeys: writeKeyHist.Observe(loads[int(k)]) - case RegionWriteQueryNum: + case utils.RegionWriteQueryNum: writeQueryHist.Observe(loads[int(k)]) - case RegionReadQueryNum: + case utils.RegionReadQueryNum: readQueryHist.Observe(loads[int(k)]) } } @@ -178,7 +162,7 @@ func (f *hotPeerCache) collectExpiredItems(region *core.RegionInfo) []*HotPeerSt if region.GetStorePeer(storeID) == nil { item := f.getOldHotPeerStat(regionID, storeID) if item != nil { - item.actionType = Remove + item.actionType = utils.Remove items = append(items, item) } } @@ -202,12 +186,12 @@ func (f *hotPeerCache) checkPeerFlow(peer *core.PeerInfo, region *core.RegionInf oldItem := f.getOldHotPeerStat(regionID, storeID) // check whether the peer is allowed to be inherited - source := direct + source := utils.Direct if oldItem == nil { for _, storeID := range f.getAllStoreIDs(region) { oldItem = f.getOldHotPeerStat(regionID, storeID) if oldItem != nil && oldItem.allowInherited { - source = inherit + source = utils.Inherit break } } @@ -231,7 +215,7 @@ func (f *hotPeerCache) checkPeerFlow(peer *core.PeerInfo, region *core.RegionInf RegionID: regionID, Loads: f.kind.GetLoadRatesFromPeer(peer), isLeader: region.GetLeader().GetStoreId() == storeID, - actionType: Update, + actionType: utils.Update, stores: make([]uint64, len(peers)), } for i, peer := range peers { @@ -272,13 +256,13 @@ func (f *hotPeerCache) checkColdPeer(storeID uint64, reportRegions map[uint64]*c // use 0 to make the cold newItem won't affect the loads. Loads: make([]float64, len(oldItem.Loads)), isLeader: oldItem.isLeader, - actionType: Update, + actionType: utils.Update, inCold: true, stores: oldItem.stores, } - deltaLoads := make([]float64, RegionStatCount) + deltaLoads := make([]float64, utils.RegionStatCount) thresholds := f.calcHotThresholds(storeID) - source := direct + source := utils.Direct for i, loads := range thresholds { deltaLoads[i] = loads * float64(interval) } @@ -293,10 +277,10 @@ func (f *hotPeerCache) checkColdPeer(storeID uint64, reportRegions map[uint64]*c func (f *hotPeerCache) collectMetrics() { for _, thresholds := range f.thresholdsOfStore { - thresholds.metrics[ByteDim].Set(thresholds.rates[ByteDim]) - thresholds.metrics[KeyDim].Set(thresholds.rates[KeyDim]) - thresholds.metrics[QueryDim].Set(thresholds.rates[QueryDim]) - thresholds.metrics[DimLen].Set(float64(thresholds.topNLen)) + thresholds.metrics[utils.ByteDim].Set(thresholds.rates[utils.ByteDim]) + thresholds.metrics[utils.KeyDim].Set(thresholds.rates[utils.KeyDim]) + thresholds.metrics[utils.QueryDim].Set(thresholds.rates[utils.QueryDim]) + thresholds.metrics[utils.DimLen].Set(float64(thresholds.topNLen)) } } @@ -320,12 +304,12 @@ func (f *hotPeerCache) calcHotThresholds(storeID uint64) []float64 { store := storeTag(storeID) kind := f.kind.String() t = &thresholds{ - rates: make([]float64, DimLen), - metrics: [DimLen + 1]prometheus.Gauge{ - ByteDim: hotCacheStatusGauge.WithLabelValues("byte-rate-threshold", store, kind), - KeyDim: hotCacheStatusGauge.WithLabelValues("key-rate-threshold", store, kind), - QueryDim: hotCacheStatusGauge.WithLabelValues("query-rate-threshold", store, kind), - DimLen: hotCacheStatusGauge.WithLabelValues("total_length", store, kind), + rates: make([]float64, utils.DimLen), + metrics: [utils.DimLen + 1]prometheus.Gauge{ + utils.ByteDim: hotCacheStatusGauge.WithLabelValues("byte-rate-threshold", store, kind), + utils.KeyDim: hotCacheStatusGauge.WithLabelValues("key-rate-threshold", store, kind), + utils.QueryDim: hotCacheStatusGauge.WithLabelValues("query-rate-threshold", store, kind), + utils.DimLen: hotCacheStatusGauge.WithLabelValues("total_length", store, kind), }, } } @@ -334,7 +318,7 @@ func (f *hotPeerCache) calcHotThresholds(storeID uint64) []float64 { t.updatedTime = time.Now() statKinds := f.kind.RegionStats() for dim, kind := range statKinds { - t.rates[dim] = MinHotThresholds[kind] + t.rates[dim] = utils.MinHotThresholds[kind] } if tn, ok := f.peersOfStore[storeID]; ok { t.topNLen = tn.Len() @@ -447,10 +431,10 @@ func (f *hotPeerCache) getHotPeerStat(regionID, storeID uint64) *HotPeerStat { return nil } -func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldItem *HotPeerStat, deltaLoads []float64, interval time.Duration, source sourceKind) *HotPeerStat { +func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldItem *HotPeerStat, deltaLoads []float64, interval time.Duration, source utils.SourceKind) *HotPeerStat { regionStats := f.kind.RegionStats() - if source == inherit { + if source == utils.Inherit { for _, dim := range oldItem.rollingLoads { newItem.rollingLoads = append(newItem.rollingLoads, dim.Clone()) } @@ -466,7 +450,7 @@ func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt // maintain anticount and hotdegree to avoid store threshold and hot peer are unstable. // For write stat, as the stat is send by region heartbeat, the first heartbeat will be skipped. // For read stat, as the stat is send by store heartbeat, the first heartbeat won't be skipped. - if f.kind == Write { + if f.kind == utils.Write { f.inheritItem(newItem, oldItem) return newItem } @@ -493,7 +477,7 @@ func (f *hotPeerCache) updateHotPeerStat(region *core.RegionInfo, newItem, oldIt if newItem.isHot(thresholds) { f.initItem(newItem) } else { - newItem.actionType = Remove + newItem.actionType = utils.Remove } } else { if newItem.isHot(thresholds) { @@ -514,7 +498,7 @@ func (f *hotPeerCache) updateNewHotPeerStat(newItem *HotPeerStat, deltaLoads []f if interval.Seconds() >= float64(f.kind.ReportInterval()) { f.initItem(newItem) } - newItem.actionType = Add + newItem.actionType = utils.Add newItem.rollingLoads = make([]*dimStat, len(regionStats)) for i, k := range regionStats { ds := newDimStat(f.interval()) @@ -530,7 +514,7 @@ func (f *hotPeerCache) updateNewHotPeerStat(newItem *HotPeerStat, deltaLoads []f func (f *hotPeerCache) putItem(item *HotPeerStat) { peers, ok := f.peersOfStore[item.StoreID] if !ok { - peers = NewTopN(DimLen, TopNN, f.topNTTL) + peers = utils.NewTopN(utils.DimLen, TopNN, f.topNTTL) f.peersOfStore[item.StoreID] = peers } peers.Put(item) @@ -564,7 +548,7 @@ func (f *hotPeerCache) coldItem(newItem, oldItem *HotPeerStat) { newItem.HotDegree = oldItem.HotDegree - 1 newItem.AntiCount = oldItem.AntiCount - 1 if newItem.AntiCount <= 0 { - newItem.actionType = Remove + newItem.actionType = utils.Remove } else { newItem.allowInherited = true } @@ -594,3 +578,7 @@ func (f *hotPeerCache) inheritItem(newItem, oldItem *HotPeerStat) { func (f *hotPeerCache) interval() time.Duration { return time.Duration(f.kind.ReportInterval()) * time.Second } + +func storeTag(id uint64) string { + return fmt.Sprintf("store-%d", id) +} diff --git a/pkg/statistics/hot_peer_cache_test.go b/pkg/statistics/hot_peer_cache_test.go index 5e0a7f64141..36f922d3830 100644 --- a/pkg/statistics/hot_peer_cache_test.go +++ b/pkg/statistics/hot_peer_cache_test.go @@ -27,15 +27,16 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/movingaverage" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/typeutil" ) func TestStoreTimeUnsync(t *testing.T) { re := require.New(t) - cache := NewHotPeerCache(context.Background(), Write) + cache := NewHotPeerCache(context.Background(), utils.Write) intervals := []uint64{120, 60} for _, interval := range intervals { - region := buildRegion(Write, 3, interval) + region := buildRegion(utils.Write, 3, interval) checkAndUpdate(re, cache, region, 3) { stats := cache.RegionStats(0) @@ -57,35 +58,35 @@ const ( ) type testCacheCase struct { - kind RWType + kind utils.RWType operator operator expect int - actionType ActionType + actionType utils.ActionType } func TestCache(t *testing.T) { re := require.New(t) tests := []*testCacheCase{ - {Read, transferLeader, 3, Update}, - {Read, movePeer, 4, Remove}, - {Read, addReplica, 4, Update}, - {Write, transferLeader, 3, Remove}, - {Write, movePeer, 4, Remove}, - {Write, addReplica, 4, Remove}, + {utils.Read, transferLeader, 3, utils.Update}, + {utils.Read, movePeer, 4, utils.Remove}, + {utils.Read, addReplica, 4, utils.Update}, + {utils.Write, transferLeader, 3, utils.Remove}, + {utils.Write, movePeer, 4, utils.Remove}, + {utils.Write, addReplica, 4, utils.Remove}, } for _, test := range tests { - defaultSize := map[RWType]int{ - Read: 3, // all peers - Write: 3, // all peers + defaultSize := map[utils.RWType]int{ + utils.Read: 3, // all peers + utils.Write: 3, // all peers } cache := NewHotPeerCache(context.Background(), test.kind) region := buildRegion(test.kind, 3, 60) checkAndUpdate(re, cache, region, defaultSize[test.kind]) - checkHit(re, cache, region, test.kind, Add) // all peers are new + checkHit(re, cache, region, test.kind, utils.Add) // all peers are new srcStore, region := schedule(re, test.operator, region, 10) res := checkAndUpdate(re, cache, region, test.expect) - checkHit(re, cache, region, test.kind, Update) // hit cache + checkHit(re, cache, region, test.kind, utils.Update) // hit cache if test.expect != defaultSize[test.kind] { checkOp(re, res, srcStore, test.actionType) } @@ -153,9 +154,9 @@ func checkAndUpdateSkipOne(re *require.Assertions, cache *hotPeerCache, region * return updateFlow(cache, res) } -func checkHit(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, kind RWType, actionType ActionType) { +func checkHit(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, kind utils.RWType, actionType utils.ActionType) { var peers []*metapb.Peer - if kind == Read { + if kind == utils.Read { peers = []*metapb.Peer{region.GetLeader()} } else { peers = region.GetPeers() @@ -167,7 +168,7 @@ func checkHit(re *require.Assertions, cache *hotPeerCache, region *core.RegionIn } } -func checkOp(re *require.Assertions, ret []*HotPeerStat, storeID uint64, actionType ActionType) { +func checkOp(re *require.Assertions, ret []*HotPeerStat, storeID uint64, actionType utils.ActionType) { for _, item := range ret { if item.StoreID == storeID { re.Equal(actionType, item.actionType) @@ -192,13 +193,13 @@ func checkIntervalSum(cache *hotPeerCache, region *core.RegionInfo) bool { // checkIntervalSumContinuous checks whether the interval sum of the peer is continuous. func checkIntervalSumContinuous(re *require.Assertions, intervalSums map[uint64]int, rets []*HotPeerStat, interval uint64) { for _, ret := range rets { - if ret.actionType == Remove { + if ret.actionType == utils.Remove { delete(intervalSums, ret.StoreID) continue } new := int(ret.getIntervalSum() / 1000000000) if old, ok := intervalSums[ret.StoreID]; ok { - re.Equal((old+int(interval))%RegionHeartBeatReportInterval, new) + re.Equal((old+int(interval))%utils.RegionHeartBeatReportInterval, new) } intervalSums[ret.StoreID] = new } @@ -251,7 +252,7 @@ func pickFollower(region *core.RegionInfo) (index int, peer *metapb.Peer) { return dst, meta.Peers[dst] } -func buildRegion(kind RWType, peerCount int, interval uint64) *core.RegionInfo { +func buildRegion(kind utils.RWType, peerCount int, interval uint64) *core.RegionInfo { peers := newPeers(peerCount, func(i int) uint64 { return uint64(10000 + i) }, func(i int) uint64 { return uint64(i) }) @@ -266,7 +267,7 @@ func buildRegion(kind RWType, peerCount int, interval uint64) *core.RegionInfo { leader := meta.Peers[rand.Intn(3)] switch kind { - case Read: + case utils.Read: return core.NewRegionInfo( meta, leader, @@ -275,7 +276,7 @@ func buildRegion(kind RWType, peerCount int, interval uint64) *core.RegionInfo { core.SetReadKeys(10*units.MiB*interval), core.SetReadQuery(1024*interval), ) - case Write: + case utils.Write: return core.NewRegionInfo( meta, leader, @@ -305,12 +306,12 @@ func newPeers(n int, pid genID, sid genID) []*metapb.Peer { func TestUpdateHotPeerStat(t *testing.T) { re := require.New(t) - cache := NewHotPeerCache(context.Background(), Read) + cache := NewHotPeerCache(context.Background(), utils.Read) storeID, regionID := uint64(1), uint64(2) peer := &metapb.Peer{StoreId: storeID} region := core.NewRegionInfo(&metapb.Region{Id: regionID, Peers: []*metapb.Peer{peer}}, peer) // we statistic read peer info from store heartbeat rather than region heartbeat - m := RegionHeartBeatReportInterval / StoreHeartBeatReportInterval + m := utils.RegionHeartBeatReportInterval / utils.StoreHeartBeatReportInterval ThresholdsUpdateInterval = 0 defer func() { ThresholdsUpdateInterval = 8 * time.Second @@ -319,9 +320,9 @@ func TestUpdateHotPeerStat(t *testing.T) { // skip interval=0 interval := 0 deltaLoads := []float64{0.0, 0.0, 0.0} - MinHotThresholds[RegionReadBytes] = 0.0 - MinHotThresholds[RegionReadKeys] = 0.0 - MinHotThresholds[RegionReadQueryNum] = 0.0 + utils.MinHotThresholds[utils.RegionReadBytes] = 0.0 + utils.MinHotThresholds[utils.RegionReadKeys] = 0.0 + utils.MinHotThresholds[utils.RegionReadQueryNum] = 0.0 newItem := cache.checkPeerFlow(core.NewPeerInfo(peer, deltaLoads, uint64(interval)), region) re.Nil(newItem) @@ -329,18 +330,18 @@ func TestUpdateHotPeerStat(t *testing.T) { // new peer, interval is larger than report interval, but no hot interval = 10 deltaLoads = []float64{0.0, 0.0, 0.0} - MinHotThresholds[RegionReadBytes] = 1.0 - MinHotThresholds[RegionReadKeys] = 1.0 - MinHotThresholds[RegionReadQueryNum] = 1.0 + utils.MinHotThresholds[utils.RegionReadBytes] = 1.0 + utils.MinHotThresholds[utils.RegionReadKeys] = 1.0 + utils.MinHotThresholds[utils.RegionReadQueryNum] = 1.0 newItem = cache.checkPeerFlow(core.NewPeerInfo(peer, deltaLoads, uint64(interval)), region) re.Nil(newItem) // new peer, interval is less than report interval interval = 4 deltaLoads = []float64{60.0, 60.0, 60.0} - MinHotThresholds[RegionReadBytes] = 0.0 - MinHotThresholds[RegionReadKeys] = 0.0 - MinHotThresholds[RegionReadQueryNum] = 0.0 + utils.MinHotThresholds[utils.RegionReadBytes] = 0.0 + utils.MinHotThresholds[utils.RegionReadKeys] = 0.0 + utils.MinHotThresholds[utils.RegionReadQueryNum] = 0.0 newItem = cache.checkPeerFlow(core.NewPeerInfo(peer, deltaLoads, uint64(interval)), region) re.NotNil(newItem) re.Equal(0, newItem.HotDegree) @@ -353,7 +354,7 @@ func TestUpdateHotPeerStat(t *testing.T) { re.Equal(0, newItem.HotDegree) re.Equal(0, newItem.AntiCount) // sum of interval is larger than report interval, and hot - newItem.AntiCount = Read.DefaultAntiCount() + newItem.AntiCount = utils.Read.DefaultAntiCount() cache.updateStat(newItem) newItem = cache.checkPeerFlow(core.NewPeerInfo(peer, deltaLoads, uint64(interval)), region) re.Equal(1, newItem.HotDegree) @@ -370,9 +371,9 @@ func TestUpdateHotPeerStat(t *testing.T) { re.Equal(2, newItem.HotDegree) re.Equal(2*m, newItem.AntiCount) // sum of interval is larger than report interval, and cold - MinHotThresholds[RegionReadBytes] = 10.0 - MinHotThresholds[RegionReadKeys] = 10.0 - MinHotThresholds[RegionReadQueryNum] = 10.0 + utils.MinHotThresholds[utils.RegionReadBytes] = 10.0 + utils.MinHotThresholds[utils.RegionReadKeys] = 10.0 + utils.MinHotThresholds[utils.RegionReadQueryNum] = 10.0 cache.updateStat(newItem) newItem = cache.checkPeerFlow(core.NewPeerInfo(peer, deltaLoads, uint64(interval)), region) re.Equal(1, newItem.HotDegree) @@ -384,12 +385,12 @@ func TestUpdateHotPeerStat(t *testing.T) { } re.Less(newItem.HotDegree, 0) re.Equal(0, newItem.AntiCount) - re.Equal(Remove, newItem.actionType) + re.Equal(utils.Remove, newItem.actionType) } func TestThresholdWithUpdateHotPeerStat(t *testing.T) { re := require.New(t) - byteRate := MinHotThresholds[RegionReadBytes] * 2 + byteRate := utils.MinHotThresholds[utils.RegionReadBytes] * 2 expectThreshold := byteRate * HotThresholdRatio testMetrics(re, 120., byteRate, expectThreshold) testMetrics(re, 60., byteRate, expectThreshold) @@ -399,9 +400,9 @@ func TestThresholdWithUpdateHotPeerStat(t *testing.T) { } func testMetrics(re *require.Assertions, interval, byteRate, expectThreshold float64) { - cache := NewHotPeerCache(context.Background(), Read) + cache := NewHotPeerCache(context.Background(), utils.Read) storeID := uint64(1) - re.GreaterOrEqual(byteRate, MinHotThresholds[RegionReadBytes]) + re.GreaterOrEqual(byteRate, utils.MinHotThresholds[utils.RegionReadBytes]) ThresholdsUpdateInterval = 0 defer func() { ThresholdsUpdateInterval = 8 * time.Second @@ -414,28 +415,28 @@ func testMetrics(re *require.Assertions, interval, byteRate, expectThreshold flo newItem := &HotPeerStat{ StoreID: storeID, RegionID: i, - actionType: Update, - Loads: make([]float64, DimLen), + actionType: utils.Update, + Loads: make([]float64, utils.DimLen), } - newItem.Loads[ByteDim] = byteRate - newItem.Loads[KeyDim] = 0 + newItem.Loads[utils.ByteDim] = byteRate + newItem.Loads[utils.KeyDim] = 0 oldItem = cache.getOldHotPeerStat(i, storeID) - if oldItem != nil && oldItem.rollingLoads[ByteDim].isHot(thresholds[ByteDim]) == true { + if oldItem != nil && oldItem.rollingLoads[utils.ByteDim].isHot(thresholds[utils.ByteDim]) == true { break } loads := []float64{byteRate * interval, 0.0, 0.0} if oldItem == nil { item = cache.updateNewHotPeerStat(newItem, loads, time.Duration(interval)*time.Second) } else { - item = cache.updateHotPeerStat(nil, newItem, oldItem, loads, time.Duration(interval)*time.Second, direct) + item = cache.updateHotPeerStat(nil, newItem, oldItem, loads, time.Duration(interval)*time.Second, utils.Direct) } cache.updateStat(item) } thresholds := cache.calcHotThresholds(storeID) if i < TopNN { - re.Equal(MinHotThresholds[RegionReadBytes], thresholds[ByteDim]) + re.Equal(utils.MinHotThresholds[utils.RegionReadBytes], thresholds[utils.ByteDim]) } else { - re.Equal(expectThreshold, thresholds[ByteDim]) + re.Equal(expectThreshold, thresholds[utils.ByteDim]) } } } @@ -446,8 +447,8 @@ func TestRemoveFromCache(t *testing.T) { interval := uint64(5) checkers := []check{checkAndUpdate, checkAndUpdateWithOrdering} for _, checker := range checkers { - cache := NewHotPeerCache(context.Background(), Write) - region := buildRegion(Write, peerCount, interval) + cache := NewHotPeerCache(context.Background(), utils.Write) + region := buildRegion(utils.Write, peerCount, interval) // prepare intervalSums := make(map[uint64]int) for i := 1; i <= 200; i++ { @@ -481,8 +482,8 @@ func TestRemoveFromCacheRandom(t *testing.T) { for _, peerCount := range peerCounts { for _, interval := range intervals { for _, checker := range checkers { - cache := NewHotPeerCache(context.Background(), Write) - region := buildRegion(Write, peerCount, interval) + cache := NewHotPeerCache(context.Background(), utils.Write) + region := buildRegion(utils.Write, peerCount, interval) target := uint64(10) intervalSums := make(map[uint64]int) @@ -506,7 +507,7 @@ func TestRemoveFromCacheRandom(t *testing.T) { break } } - if interval < RegionHeartBeatReportInterval { + if interval < utils.RegionHeartBeatReportInterval { re.True(checkIntervalSum(cache, region)) } re.Len(cache.storesOfRegion[region.GetID()], peerCount) @@ -535,8 +536,8 @@ func checkCoolDown(re *require.Assertions, cache *hotPeerCache, region *core.Reg func TestCoolDownTransferLeader(t *testing.T) { re := require.New(t) - cache := NewHotPeerCache(context.Background(), Read) - region := buildRegion(Read, 3, 60) + cache := NewHotPeerCache(context.Background(), utils.Read) + region := buildRegion(utils.Read, 3, 60) moveLeader := func() { _, region = schedule(re, movePeer, region, 10) @@ -568,8 +569,8 @@ func TestCoolDownTransferLeader(t *testing.T) { } testCases := []func(){moveLeader, transferLeader, movePeer, addReplica, removeReplica} for _, testCase := range testCases { - cache = NewHotPeerCache(context.Background(), Read) - region = buildRegion(Read, 3, 60) + cache = NewHotPeerCache(context.Background(), utils.Read) + region = buildRegion(utils.Read, 3, 60) for i := 1; i <= 200; i++ { checkAndUpdate(re, cache, region) } @@ -581,8 +582,8 @@ func TestCoolDownTransferLeader(t *testing.T) { // See issue #4510 func TestCacheInherit(t *testing.T) { re := require.New(t) - cache := NewHotPeerCache(context.Background(), Read) - region := buildRegion(Read, 3, 10) + cache := NewHotPeerCache(context.Background(), utils.Read) + region := buildRegion(utils.Read, 3, 10) // prepare for i := 1; i <= 200; i++ { checkAndUpdate(re, cache, region) @@ -594,9 +595,9 @@ func TestCacheInherit(t *testing.T) { newStoreID, region = schedule(re, removeReplica, region) rets := checkAndUpdate(re, cache, region) for _, ret := range rets { - if ret.actionType != Remove { - flow := ret.Loads[ByteDim] - re.Equal(float64(region.GetBytesRead()/ReadReportInterval), flow) + if ret.actionType != utils.Remove { + flow := ret.Loads[utils.ByteDim] + re.Equal(float64(region.GetBytesRead()/utils.StoreHeartBeatReportInterval), flow) } } // new flow @@ -611,9 +612,9 @@ func TestCacheInherit(t *testing.T) { _, region = schedule(re, removeReplica, region) rets = checkAndUpdate(re, cache, region) for _, ret := range rets { - if ret.actionType != Remove { - flow := ret.Loads[ByteDim] - re.Equal(float64(newFlow/ReadReportInterval), flow) + if ret.actionType != utils.Remove { + flow := ret.Loads[utils.ByteDim] + re.Equal(float64(newFlow/utils.StoreHeartBeatReportInterval), flow) } } } @@ -625,7 +626,7 @@ type testMovingAverageCase struct { func checkMovingAverage(re *require.Assertions, testCase *testMovingAverageCase) { interval := time.Second - tm := movingaverage.NewTimeMedian(DefaultAotSize, DefaultWriteMfSize, interval) + tm := movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultWriteMfSize, interval) var results []float64 for _, data := range testCase.report { tm.Add(data, interval) @@ -672,7 +673,7 @@ func TestHotPeerCacheTopNThreshold(t *testing.T) { re := require.New(t) testWithUpdateInterval := func(interval time.Duration) { ThresholdsUpdateInterval = interval - cache := NewHotPeerCache(context.Background(), Write) + cache := NewHotPeerCache(context.Background(), utils.Write) now := time.Now() for id := uint64(0); id < 100; id++ { meta := &metapb.Region{ @@ -695,17 +696,17 @@ func TestHotPeerCacheTopNThreshold(t *testing.T) { } if ThresholdsUpdateInterval == 0 { if id < 60 { - re.Equal(MinHotThresholds[RegionWriteKeys], cache.calcHotThresholds(1)[KeyDim]) // num 0, EmptyRegion: region.GetApproximateSize() <= core.EmptyRegionApproximateSize, OversizedRegion: region.IsOversized( - int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxSize()), - int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys()), + int64(r.conf.GetRegionMaxSize()), + int64(r.conf.GetRegionMaxKeys()), ), UndersizedRegion: region.NeedMerge( int64(r.conf.GetMaxMergeRegionSize()), diff --git a/pkg/statistics/region_collection_test.go b/pkg/statistics/region_collection_test.go index f767c30fcd4..bc15c648598 100644 --- a/pkg/statistics/region_collection_test.go +++ b/pkg/statistics/region_collection_test.go @@ -65,7 +65,7 @@ func TestRegionStatistics(t *testing.T) { r2 := &metapb.Region{Id: 2, Peers: peers[0:2], StartKey: []byte("cc"), EndKey: []byte("dd")} region1 := core.NewRegionInfo(r1, peers[0]) region2 := core.NewRegionInfo(r2, peers[0]) - regionStats := NewRegionStatistics(nil, opt, manager, nil) + regionStats := NewRegionStatistics(nil, opt, manager) regionStats.Observe(region1, stores) re.Len(regionStats.stats[ExtraPeer], 1) re.Len(regionStats.stats[LearnerPeer], 1) @@ -150,7 +150,7 @@ func TestRegionStatisticsWithPlacementRule(t *testing.T) { region3 := core.NewRegionInfo(r3, peers[0]) region4 := core.NewRegionInfo(r4, peers[0]) region5 := core.NewRegionInfo(r5, peers[4]) - regionStats := NewRegionStatistics(nil, opt, manager, nil) + regionStats := NewRegionStatistics(nil, opt, manager) // r2 didn't match the rules regionStats.Observe(region2, stores) re.Len(regionStats.stats[MissPeer], 1) diff --git a/pkg/statistics/region_stat_informer.go b/pkg/statistics/region_stat_informer.go index 8b5ba536cb7..4fec5b4aacf 100644 --- a/pkg/statistics/region_stat_informer.go +++ b/pkg/statistics/region_stat_informer.go @@ -14,11 +14,14 @@ package statistics -import "github.com/tikv/pd/pkg/core" +import ( + "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/statistics/utils" +) // RegionStatInformer provides access to a shared informer of statistics. type RegionStatInformer interface { - GetHotPeerStat(rw RWType, regionID, storeID uint64) *HotPeerStat + GetHotPeerStat(rw utils.RWType, regionID, storeID uint64) *HotPeerStat IsRegionHot(region *core.RegionInfo) bool // RegionWriteStats return the storeID -> write stat of peers on this store. // The result only includes peers that are hot enough. diff --git a/pkg/statistics/store.go b/pkg/statistics/store.go index d8288a9b9cf..baeef0ad417 100644 --- a/pkg/statistics/store.go +++ b/pkg/statistics/store.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/movingaverage" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" ) @@ -107,7 +108,7 @@ func (s *StoresStats) GetStoresLoads() map[uint64][]float64 { defer s.RUnlock() res := make(map[uint64][]float64, len(s.rollingStoresStats)) for storeID, stats := range s.rollingStoresStats { - for i := StoreStatKind(0); i < StoreStatCount; i++ { + for i := utils.StoreStatKind(0); i < utils.StoreStatCount; i++ { res[storeID] = append(res[storeID], stats.GetLoad(i)) } } @@ -140,25 +141,25 @@ type RollingStoreStats struct { // NewRollingStoreStats creates a RollingStoreStats. func newRollingStoreStats() *RollingStoreStats { - timeMedians := make([]*movingaverage.TimeMedian, StoreStatCount) - movingAvgs := make([]movingaverage.MovingAvg, StoreStatCount) + timeMedians := make([]*movingaverage.TimeMedian, utils.StoreStatCount) + movingAvgs := make([]movingaverage.MovingAvg, utils.StoreStatCount) // from StoreHeartbeat - interval := StoreHeartBeatReportInterval * time.Second - timeMedians[StoreReadBytes] = movingaverage.NewTimeMedian(DefaultAotSize, DefaultReadMfSize, interval) - timeMedians[StoreReadKeys] = movingaverage.NewTimeMedian(DefaultAotSize, DefaultReadMfSize, interval) - timeMedians[StoreReadQuery] = movingaverage.NewTimeMedian(DefaultAotSize, DefaultReadMfSize, interval) - timeMedians[StoreWriteBytes] = movingaverage.NewTimeMedian(DefaultAotSize, DefaultWriteMfSize, interval) - timeMedians[StoreWriteKeys] = movingaverage.NewTimeMedian(DefaultAotSize, DefaultWriteMfSize, interval) - timeMedians[StoreWriteQuery] = movingaverage.NewTimeMedian(DefaultAotSize, DefaultWriteMfSize, interval) - movingAvgs[StoreCPUUsage] = movingaverage.NewMedianFilter(storeStatsRollingWindowsSize) - movingAvgs[StoreDiskReadRate] = movingaverage.NewMedianFilter(storeStatsRollingWindowsSize) - movingAvgs[StoreDiskWriteRate] = movingaverage.NewMedianFilter(storeStatsRollingWindowsSize) + interval := utils.StoreHeartBeatReportInterval * time.Second + timeMedians[utils.StoreReadBytes] = movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultReadMfSize, interval) + timeMedians[utils.StoreReadKeys] = movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultReadMfSize, interval) + timeMedians[utils.StoreReadQuery] = movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultReadMfSize, interval) + timeMedians[utils.StoreWriteBytes] = movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultWriteMfSize, interval) + timeMedians[utils.StoreWriteKeys] = movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultWriteMfSize, interval) + timeMedians[utils.StoreWriteQuery] = movingaverage.NewTimeMedian(utils.DefaultAotSize, utils.DefaultWriteMfSize, interval) + movingAvgs[utils.StoreCPUUsage] = movingaverage.NewMedianFilter(storeStatsRollingWindowsSize) + movingAvgs[utils.StoreDiskReadRate] = movingaverage.NewMedianFilter(storeStatsRollingWindowsSize) + movingAvgs[utils.StoreDiskWriteRate] = movingaverage.NewMedianFilter(storeStatsRollingWindowsSize) // from RegionHeartbeat // The data from regionStats is used in TiFlash, so higher tolerance is required - movingAvgs[StoreRegionsWriteBytes] = movingaverage.NewMedianFilter(RegionsStatsRollingWindowsSize) - movingAvgs[StoreRegionsWriteKeys] = movingaverage.NewMedianFilter(RegionsStatsRollingWindowsSize) + movingAvgs[utils.StoreRegionsWriteBytes] = movingaverage.NewMedianFilter(RegionsStatsRollingWindowsSize) + movingAvgs[utils.StoreRegionsWriteKeys] = movingaverage.NewMedianFilter(RegionsStatsRollingWindowsSize) return &RollingStoreStats{ timeMedians: timeMedians, @@ -190,25 +191,25 @@ func (r *RollingStoreStats) Observe(stats *pdpb.StoreStats) { r.Lock() defer r.Unlock() readQueryNum, writeQueryNum := core.GetReadQueryNum(stats.QueryStats), core.GetWriteQueryNum(stats.QueryStats) - r.timeMedians[StoreWriteBytes].Add(float64(stats.BytesWritten), interval) - r.timeMedians[StoreWriteKeys].Add(float64(stats.KeysWritten), interval) - r.timeMedians[StoreWriteQuery].Add(float64(writeQueryNum), interval) - r.timeMedians[StoreReadBytes].Add(float64(stats.BytesRead), interval) - r.timeMedians[StoreReadKeys].Add(float64(stats.KeysRead), interval) - r.timeMedians[StoreReadQuery].Add(float64(readQueryNum), interval) + r.timeMedians[utils.StoreWriteBytes].Add(float64(stats.BytesWritten), interval) + r.timeMedians[utils.StoreWriteKeys].Add(float64(stats.KeysWritten), interval) + r.timeMedians[utils.StoreWriteQuery].Add(float64(writeQueryNum), interval) + r.timeMedians[utils.StoreReadBytes].Add(float64(stats.BytesRead), interval) + r.timeMedians[utils.StoreReadKeys].Add(float64(stats.KeysRead), interval) + r.timeMedians[utils.StoreReadQuery].Add(float64(readQueryNum), interval) // Updates the cpu usages and disk rw rates of store. - r.movingAvgs[StoreCPUUsage].Add(collect(stats.GetCpuUsages())) - r.movingAvgs[StoreDiskReadRate].Add(collect(stats.GetReadIoRates())) - r.movingAvgs[StoreDiskWriteRate].Add(collect(stats.GetWriteIoRates())) + r.movingAvgs[utils.StoreCPUUsage].Add(collect(stats.GetCpuUsages())) + r.movingAvgs[utils.StoreDiskReadRate].Add(collect(stats.GetReadIoRates())) + r.movingAvgs[utils.StoreDiskWriteRate].Add(collect(stats.GetWriteIoRates())) } // ObserveRegionsStats records current statistics from region stats. func (r *RollingStoreStats) ObserveRegionsStats(writeBytesRate, writeKeysRate float64) { r.Lock() defer r.Unlock() - r.movingAvgs[StoreRegionsWriteBytes].Add(writeBytesRate) - r.movingAvgs[StoreRegionsWriteKeys].Add(writeKeysRate) + r.movingAvgs[utils.StoreRegionsWriteBytes].Add(writeBytesRate) + r.movingAvgs[utils.StoreRegionsWriteKeys].Add(writeKeysRate) } // Set sets the statistics (for test). @@ -221,46 +222,46 @@ func (r *RollingStoreStats) Set(stats *pdpb.StoreStats) { r.Lock() defer r.Unlock() readQueryNum, writeQueryNum := core.GetReadQueryNum(stats.QueryStats), core.GetWriteQueryNum(stats.QueryStats) - r.timeMedians[StoreWriteBytes].Set(float64(stats.BytesWritten) / interval) - r.timeMedians[StoreReadBytes].Set(float64(stats.BytesRead) / interval) - r.timeMedians[StoreWriteKeys].Set(float64(stats.KeysWritten) / interval) - r.timeMedians[StoreReadKeys].Set(float64(stats.KeysRead) / interval) - r.timeMedians[StoreReadQuery].Set(float64(readQueryNum) / interval) - r.timeMedians[StoreWriteQuery].Set(float64(writeQueryNum) / interval) - r.movingAvgs[StoreCPUUsage].Set(collect(stats.GetCpuUsages())) - r.movingAvgs[StoreDiskReadRate].Set(collect(stats.GetReadIoRates())) - r.movingAvgs[StoreDiskWriteRate].Set(collect(stats.GetWriteIoRates())) + r.timeMedians[utils.StoreWriteBytes].Set(float64(stats.BytesWritten) / interval) + r.timeMedians[utils.StoreReadBytes].Set(float64(stats.BytesRead) / interval) + r.timeMedians[utils.StoreWriteKeys].Set(float64(stats.KeysWritten) / interval) + r.timeMedians[utils.StoreReadKeys].Set(float64(stats.KeysRead) / interval) + r.timeMedians[utils.StoreReadQuery].Set(float64(readQueryNum) / interval) + r.timeMedians[utils.StoreWriteQuery].Set(float64(writeQueryNum) / interval) + r.movingAvgs[utils.StoreCPUUsage].Set(collect(stats.GetCpuUsages())) + r.movingAvgs[utils.StoreDiskReadRate].Set(collect(stats.GetReadIoRates())) + r.movingAvgs[utils.StoreDiskWriteRate].Set(collect(stats.GetWriteIoRates())) } // SetRegionsStats sets the statistics from region stats (for test). func (r *RollingStoreStats) SetRegionsStats(writeBytesRate, writeKeysRate float64) { r.Lock() defer r.Unlock() - r.movingAvgs[StoreRegionsWriteBytes].Set(writeBytesRate) - r.movingAvgs[StoreRegionsWriteKeys].Set(writeKeysRate) + r.movingAvgs[utils.StoreRegionsWriteBytes].Set(writeBytesRate) + r.movingAvgs[utils.StoreRegionsWriteKeys].Set(writeKeysRate) } // GetLoad returns store's load. -func (r *RollingStoreStats) GetLoad(k StoreStatKind) float64 { +func (r *RollingStoreStats) GetLoad(k utils.StoreStatKind) float64 { r.RLock() defer r.RUnlock() switch k { - case StoreReadBytes, StoreReadKeys, StoreReadQuery, StoreWriteBytes, StoreWriteKeys, StoreWriteQuery: + case utils.StoreReadBytes, utils.StoreReadKeys, utils.StoreReadQuery, utils.StoreWriteBytes, utils.StoreWriteKeys, utils.StoreWriteQuery: return r.timeMedians[k].Get() - case StoreCPUUsage, StoreDiskReadRate, StoreDiskWriteRate, StoreRegionsWriteBytes, StoreRegionsWriteKeys: + case utils.StoreCPUUsage, utils.StoreDiskReadRate, utils.StoreDiskWriteRate, utils.StoreRegionsWriteBytes, utils.StoreRegionsWriteKeys: return r.movingAvgs[k].Get() } return 0 } // GetInstantLoad returns store's instant load. -func (r *RollingStoreStats) GetInstantLoad(k StoreStatKind) float64 { +func (r *RollingStoreStats) GetInstantLoad(k utils.StoreStatKind) float64 { r.RLock() defer r.RUnlock() switch k { - case StoreReadBytes, StoreReadKeys, StoreReadQuery, StoreWriteBytes, StoreWriteKeys, StoreWriteQuery: + case utils.StoreReadBytes, utils.StoreReadKeys, utils.StoreReadQuery, utils.StoreWriteBytes, utils.StoreWriteKeys, utils.StoreWriteQuery: return r.timeMedians[k].GetInstantaneous() - case StoreCPUUsage, StoreDiskReadRate, StoreDiskWriteRate, StoreRegionsWriteBytes, StoreRegionsWriteKeys: + case utils.StoreCPUUsage, utils.StoreDiskReadRate, utils.StoreDiskWriteRate, utils.StoreRegionsWriteBytes, utils.StoreRegionsWriteKeys: return r.movingAvgs[k].GetInstantaneous() } return 0 diff --git a/pkg/statistics/store_collection.go b/pkg/statistics/store_collection.go index c362041ca00..2b695ed2923 100644 --- a/pkg/statistics/store_collection.go +++ b/pkg/statistics/store_collection.go @@ -22,6 +22,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/server/config" ) @@ -32,7 +33,6 @@ const ( type storeStatistics struct { opt *config.PersistOptions - storeConfig *config.StoreConfig Up int Disconnect int Unhealthy int @@ -54,10 +54,9 @@ type storeStatistics struct { Removed int } -func newStoreStatistics(opt *config.PersistOptions, storeConfig *config.StoreConfig) *storeStatistics { +func newStoreStatistics(opt *config.PersistOptions) *storeStatistics { return &storeStatistics{ opt: opt, - storeConfig: storeConfig, LabelCounter: make(map[string]int), } } @@ -154,26 +153,26 @@ func (s *storeStatistics) Observe(store *core.StoreInfo, stats *StoresStats) { return } - storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_bytes").Set(storeFlowStats.GetLoad(StoreWriteBytes)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_bytes").Set(storeFlowStats.GetLoad(StoreReadBytes)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_keys").Set(storeFlowStats.GetLoad(StoreWriteKeys)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_keys").Set(storeFlowStats.GetLoad(StoreReadKeys)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_query_rate").Set(storeFlowStats.GetLoad(StoreWriteQuery)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_query_rate").Set(storeFlowStats.GetLoad(StoreReadQuery)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_cpu_usage").Set(storeFlowStats.GetLoad(StoreCPUUsage)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_disk_read_rate").Set(storeFlowStats.GetLoad(StoreDiskReadRate)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_disk_write_rate").Set(storeFlowStats.GetLoad(StoreDiskWriteRate)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_bytes").Set(storeFlowStats.GetLoad(StoreRegionsWriteBytes)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_keys").Set(storeFlowStats.GetLoad(StoreRegionsWriteKeys)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_bytes").Set(storeFlowStats.GetLoad(utils.StoreWriteBytes)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_bytes").Set(storeFlowStats.GetLoad(utils.StoreReadBytes)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_keys").Set(storeFlowStats.GetLoad(utils.StoreWriteKeys)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_keys").Set(storeFlowStats.GetLoad(utils.StoreReadKeys)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_query_rate").Set(storeFlowStats.GetLoad(utils.StoreWriteQuery)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_query_rate").Set(storeFlowStats.GetLoad(utils.StoreReadQuery)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_cpu_usage").Set(storeFlowStats.GetLoad(utils.StoreCPUUsage)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_disk_read_rate").Set(storeFlowStats.GetLoad(utils.StoreDiskReadRate)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_disk_write_rate").Set(storeFlowStats.GetLoad(utils.StoreDiskWriteRate)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_bytes").Set(storeFlowStats.GetLoad(utils.StoreRegionsWriteBytes)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_keys").Set(storeFlowStats.GetLoad(utils.StoreRegionsWriteKeys)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_bytes_instant").Set(storeFlowStats.GetInstantLoad(StoreWriteBytes)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_bytes_instant").Set(storeFlowStats.GetInstantLoad(StoreReadBytes)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(StoreWriteKeys)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(StoreReadKeys)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_query_rate_instant").Set(storeFlowStats.GetInstantLoad(StoreWriteQuery)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_query_rate_instant").Set(storeFlowStats.GetInstantLoad(StoreReadQuery)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_bytes_instant").Set(storeFlowStats.GetInstantLoad(StoreRegionsWriteBytes)) - storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(StoreRegionsWriteKeys)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_bytes_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreWriteBytes)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_bytes_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreReadBytes)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreWriteKeys)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreReadKeys)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_write_query_rate_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreWriteQuery)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_read_query_rate_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreReadQuery)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_bytes_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreRegionsWriteBytes)) + storeStatusGauge.WithLabelValues(storeAddress, id, "store_regions_write_rate_keys_instant").Set(storeFlowStats.GetInstantLoad(utils.StoreRegionsWriteKeys)) } func (s *storeStatistics) Collect() { @@ -219,10 +218,11 @@ func (s *storeStatistics) Collect() { configs["max-snapshot-count"] = float64(s.opt.GetMaxSnapshotCount()) configs["max-merge-region-size"] = float64(s.opt.GetMaxMergeRegionSize()) configs["max-merge-region-keys"] = float64(s.opt.GetMaxMergeRegionKeys()) - configs["region-max-size"] = float64(s.storeConfig.GetRegionMaxSize()) - configs["region-split-size"] = float64(s.storeConfig.GetRegionSplitSize()) - configs["region-split-keys"] = float64(s.storeConfig.GetRegionSplitKeys()) - configs["region-max-keys"] = float64(s.storeConfig.GetRegionMaxKeys()) + storeConfig := s.opt.GetStoreConfig() + configs["region-max-size"] = float64(storeConfig.GetRegionMaxSize()) + configs["region-split-size"] = float64(storeConfig.GetRegionSplitSize()) + configs["region-split-keys"] = float64(storeConfig.GetRegionSplitKeys()) + configs["region-max-keys"] = float64(storeConfig.GetRegionMaxKeys()) var enableMakeUpReplica, enableRemoveDownReplica, enableRemoveExtraReplica, enableReplaceOfflineReplica float64 if s.opt.IsMakeUpReplicaEnabled() { @@ -291,10 +291,10 @@ type storeStatisticsMap struct { } // NewStoreStatisticsMap creates a new storeStatisticsMap. -func NewStoreStatisticsMap(opt *config.PersistOptions, storeConfig *config.StoreConfig) *storeStatisticsMap { +func NewStoreStatisticsMap(opt *config.PersistOptions) *storeStatisticsMap { return &storeStatisticsMap{ opt: opt, - stats: newStoreStatistics(opt, storeConfig), + stats: newStoreStatistics(opt), } } diff --git a/pkg/statistics/store_collection_test.go b/pkg/statistics/store_collection_test.go index 76a0dbd1b32..229339cb4c4 100644 --- a/pkg/statistics/store_collection_test.go +++ b/pkg/statistics/store_collection_test.go @@ -26,6 +26,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/mock/mockconfig" + "github.com/tikv/pd/pkg/statistics/utils" ) func TestStoreStatistics(t *testing.T) { @@ -64,7 +65,7 @@ func TestStoreStatistics(t *testing.T) { UsedSize: 0, })) stores[5] = store5 - storeStats := NewStoreStatisticsMap(opt, nil) + storeStats := NewStoreStatisticsMap(opt) for _, store := range stores { storeStats.Observe(store, storesStats) } @@ -93,10 +94,10 @@ func TestStoreStatistics(t *testing.T) { func TestSummaryStoreInfos(t *testing.T) { re := require.New(t) - rw := Read + rw := utils.Read kind := constant.LeaderKind collector := newTikvCollector() - storeHistoryLoad := NewStoreHistoryLoads(DimLen) + storeHistoryLoad := NewStoreHistoryLoads(utils.DimLen) storeInfos := make(map[uint64]*StoreSummaryInfo) storeLoads := make(map[uint64][]float64) for _, storeID := range []int{1, 3} { diff --git a/pkg/statistics/store_hot_peers_infos.go b/pkg/statistics/store_hot_peers_infos.go index 9480c0ed879..59ee3c20b6f 100644 --- a/pkg/statistics/store_hot_peers_infos.go +++ b/pkg/statistics/store_hot_peers_infos.go @@ -20,6 +20,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" + "github.com/tikv/pd/pkg/statistics/utils" ) // StoreHotPeersInfos is used to get human-readable description for hot regions. @@ -35,7 +36,7 @@ type StoreHotPeersStat map[uint64]*HotPeersStat // CollectHotPeerInfos only returns TotalBytesRate,TotalKeysRate,TotalQueryRate,Count func CollectHotPeerInfos(stores []*core.StoreInfo, regionStats map[uint64][]*HotPeerStat) *StoreHotPeersInfos { - peerLoadSum := make([]float64, DimLen) + peerLoadSum := make([]float64, utils.DimLen) collect := func(kind constant.ResourceKind) StoreHotPeersStat { ret := make(StoreHotPeersStat, len(stores)) for _, store := range stores { @@ -54,9 +55,9 @@ func CollectHotPeerInfos(stores []*core.StoreInfo, regionStats map[uint64][]*Hot } } ret[id] = &HotPeersStat{ - TotalBytesRate: peerLoadSum[ByteDim], - TotalKeysRate: peerLoadSum[KeyDim], - TotalQueryRate: peerLoadSum[QueryDim], + TotalBytesRate: peerLoadSum[utils.ByteDim], + TotalKeysRate: peerLoadSum[utils.KeyDim], + TotalQueryRate: peerLoadSum[utils.QueryDim], Count: len(peers), } } @@ -70,7 +71,7 @@ func CollectHotPeerInfos(stores []*core.StoreInfo, regionStats map[uint64][]*Hot // GetHotStatus returns the hot status for a given type. // NOTE: This function is exported by HTTP API. It does not contain `isLearner` and `LastUpdateTime` field. If need, please call `updateRegionInfo`. -func GetHotStatus(stores []*core.StoreInfo, storesLoads map[uint64][]float64, regionStats map[uint64][]*HotPeerStat, typ RWType, isTraceRegionFlow bool) *StoreHotPeersInfos { +func GetHotStatus(stores []*core.StoreInfo, storesLoads map[uint64][]float64, regionStats map[uint64][]*HotPeerStat, typ utils.RWType, isTraceRegionFlow bool) *StoreHotPeersInfos { stInfos := SummaryStoreInfos(stores) stLoadInfosAsLeader := SummaryStoresLoad( stInfos, @@ -110,7 +111,7 @@ func SummaryStoresLoad( storesHistoryLoads *StoreHistoryLoads, storeHotPeers map[uint64][]*HotPeerStat, isTraceRegionFlow bool, - rwTy RWType, + rwTy utils.RWType, kind constant.ResourceKind, ) map[uint64]*StoreLoadDetail { // loadDetail stores the storeID -> hotPeers stat and its current and future stat(rate,count) @@ -144,13 +145,13 @@ func summaryStoresLoadByEngine( storesLoads map[uint64][]float64, storesHistoryLoads *StoreHistoryLoads, storeHotPeers map[uint64][]*HotPeerStat, - rwTy RWType, + rwTy utils.RWType, kind constant.ResourceKind, collector storeCollector, ) []*StoreLoadDetail { loadDetail := make([]*StoreLoadDetail, 0, len(storeInfos)) - allStoreLoadSum := make([]float64, DimLen) - allStoreHistoryLoadSum := make([][]float64, DimLen) + allStoreLoadSum := make([]float64, utils.DimLen) + allStoreHistoryLoadSum := make([][]float64, utils.DimLen) allStoreCount := 0 allHotPeersCount := 0 @@ -164,7 +165,7 @@ func summaryStoresLoadByEngine( // Find all hot peers first var hotPeers []*HotPeerStat - peerLoadSum := make([]float64, DimLen) + peerLoadSum := make([]float64, utils.DimLen) // TODO: To remove `filterHotPeers`, we need to: // HotLeaders consider `Write{Bytes,Keys}`, so when we schedule `writeLeader`, all peers are leader. for _, peer := range filterHotPeers(kind, storeHotPeers[id]) { @@ -177,11 +178,11 @@ func summaryStoresLoadByEngine( // Metric for debug. // TODO: pre-allocate gauge metrics ty := "byte-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[ByteDim]) + hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[utils.ByteDim]) ty = "key-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[KeyDim]) + hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[utils.KeyDim]) ty = "query-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[QueryDim]) + hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[utils.QueryDim]) } loads := collector.GetLoads(storeLoads, peerLoadSum, rwTy, kind) @@ -231,7 +232,7 @@ func summaryStoresLoadByEngine( } // todo: remove some the max value or min value to avoid the effect of extreme value. - expectHistoryLoads := make([][]float64, DimLen) + expectHistoryLoads := make([][]float64, utils.DimLen) for i := range allStoreHistoryLoadSum { expectHistoryLoads[i] = make([]float64, len(allStoreHistoryLoadSum[i])) for j := range allStoreHistoryLoadSum[i] { @@ -254,19 +255,19 @@ func summaryStoresLoadByEngine( // Metric for debug. engine := collector.Engine() ty := "exp-byte-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[ByteDim]) + hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[utils.ByteDim]) ty = "exp-key-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[KeyDim]) + hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[utils.KeyDim]) ty = "exp-query-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[QueryDim]) + hotPeerSummary.WithLabelValues(ty, engine).Set(expectLoads[utils.QueryDim]) ty = "exp-count-rate-" + rwTy.String() + "-" + kind.String() hotPeerSummary.WithLabelValues(ty, engine).Set(expectCount) ty = "stddev-byte-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, engine).Set(stddevLoads[ByteDim]) + hotPeerSummary.WithLabelValues(ty, engine).Set(stddevLoads[utils.ByteDim]) ty = "stddev-key-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, engine).Set(stddevLoads[KeyDim]) + hotPeerSummary.WithLabelValues(ty, engine).Set(stddevLoads[utils.KeyDim]) ty = "stddev-query-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, engine).Set(stddevLoads[QueryDim]) + hotPeerSummary.WithLabelValues(ty, engine).Set(stddevLoads[utils.QueryDim]) } expect := StoreLoad{ Loads: expectLoads, diff --git a/pkg/statistics/store_load.go b/pkg/statistics/store_load.go index d110ec4a7f3..79417b65b7e 100644 --- a/pkg/statistics/store_load.go +++ b/pkg/statistics/store_load.go @@ -20,6 +20,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/constant" + "github.com/tikv/pd/pkg/statistics/utils" ) // StoreLoadDetail records store load information. @@ -31,8 +32,8 @@ type StoreLoadDetail struct { // ToHotPeersStat abstracts load information to HotPeersStat. func (li *StoreLoadDetail) ToHotPeersStat() *HotPeersStat { - storeByteRate, storeKeyRate, storeQueryRate := li.LoadPred.Current.Loads[ByteDim], - li.LoadPred.Current.Loads[KeyDim], li.LoadPred.Current.Loads[QueryDim] + storeByteRate, storeKeyRate, storeQueryRate := li.LoadPred.Current.Loads[utils.ByteDim], + li.LoadPred.Current.Loads[utils.KeyDim], li.LoadPred.Current.Loads[utils.QueryDim] if len(li.HotPeers) == 0 { return &HotPeersStat{ StoreByteRate: storeByteRate, @@ -50,9 +51,9 @@ func (li *StoreLoadDetail) ToHotPeersStat() *HotPeersStat { for _, peer := range li.HotPeers { if peer.HotDegree > 0 { peers = append(peers, toHotPeerStatShow(peer)) - byteRate += peer.GetLoad(ByteDim) - keyRate += peer.GetLoad(KeyDim) - queryRate += peer.GetLoad(QueryDim) + byteRate += peer.GetLoad(utils.ByteDim) + keyRate += peer.GetLoad(utils.KeyDim) + queryRate += peer.GetLoad(utils.QueryDim) } } @@ -74,9 +75,9 @@ func (li *StoreLoadDetail) IsUniform(dim int, threshold float64) bool { } func toHotPeerStatShow(p *HotPeerStat) HotPeerStatShow { - byteRate := p.GetLoad(ByteDim) - keyRate := p.GetLoad(KeyDim) - queryRate := p.GetLoad(QueryDim) + byteRate := p.GetLoad(utils.ByteDim) + keyRate := p.GetLoad(utils.KeyDim) + queryRate := p.GetLoad(utils.QueryDim) return HotPeerStatShow{ StoreID: p.StoreID, Stores: p.GetStores(), @@ -152,21 +153,21 @@ type StoreLoad struct { } // ToLoadPred returns the current load and future predictive load. -func (load StoreLoad) ToLoadPred(rwTy RWType, infl *Influence) *StoreLoadPred { +func (load StoreLoad) ToLoadPred(rwTy utils.RWType, infl *Influence) *StoreLoadPred { future := StoreLoad{ Loads: append(load.Loads[:0:0], load.Loads...), Count: load.Count, } if infl != nil { switch rwTy { - case Read: - future.Loads[ByteDim] += infl.Loads[RegionReadBytes] - future.Loads[KeyDim] += infl.Loads[RegionReadKeys] - future.Loads[QueryDim] += infl.Loads[RegionReadQueryNum] - case Write: - future.Loads[ByteDim] += infl.Loads[RegionWriteBytes] - future.Loads[KeyDim] += infl.Loads[RegionWriteKeys] - future.Loads[QueryDim] += infl.Loads[RegionWriteQueryNum] + case utils.Read: + future.Loads[utils.ByteDim] += infl.Loads[utils.RegionReadBytes] + future.Loads[utils.KeyDim] += infl.Loads[utils.RegionReadKeys] + future.Loads[utils.QueryDim] += infl.Loads[utils.RegionReadQueryNum] + case utils.Write: + future.Loads[utils.ByteDim] += infl.Loads[utils.RegionWriteBytes] + future.Loads[utils.KeyDim] += infl.Loads[utils.RegionWriteKeys] + future.Loads[utils.QueryDim] += infl.Loads[utils.RegionWriteQueryNum] } future.Count += infl.Count } @@ -255,14 +256,14 @@ var ( // StoreHistoryLoads records the history load of a store. type StoreHistoryLoads struct { // loads[read/write][leader/follower]-->[store id]-->history load - loads [RWTypeLen][constant.ResourceKindLen]map[uint64]*storeHistoryLoad + loads [utils.RWTypeLen][constant.ResourceKindLen]map[uint64]*storeHistoryLoad dim int } // NewStoreHistoryLoads creates a StoreHistoryLoads. func NewStoreHistoryLoads(dim int) *StoreHistoryLoads { st := StoreHistoryLoads{dim: dim} - for i := RWType(0); i < RWTypeLen; i++ { + for i := utils.RWType(0); i < utils.RWTypeLen; i++ { for j := constant.ResourceKind(0); j < constant.ResourceKindLen; j++ { st.loads[i][j] = make(map[uint64]*storeHistoryLoad) } @@ -271,7 +272,7 @@ func NewStoreHistoryLoads(dim int) *StoreHistoryLoads { } // Add adds the store load to the history. -func (s *StoreHistoryLoads) Add(storeID uint64, rwTp RWType, kind constant.ResourceKind, loads []float64) { +func (s *StoreHistoryLoads) Add(storeID uint64, rwTp utils.RWType, kind constant.ResourceKind, loads []float64) { load, ok := s.loads[rwTp][kind][storeID] if !ok { size := defaultSize @@ -285,7 +286,7 @@ func (s *StoreHistoryLoads) Add(storeID uint64, rwTp RWType, kind constant.Resou } // Get returns the store loads from the history, not one time point. -func (s *StoreHistoryLoads) Get(storeID uint64, rwTp RWType, kind constant.ResourceKind) [][]float64 { +func (s *StoreHistoryLoads) Get(storeID uint64, rwTp utils.RWType, kind constant.ResourceKind) [][]float64 { load, ok := s.loads[rwTp][kind][storeID] if !ok { return [][]float64{} diff --git a/pkg/statistics/store_load_test.go b/pkg/statistics/store_load_test.go index 18441f00dbc..67f2dff9cf9 100644 --- a/pkg/statistics/store_load_test.go +++ b/pkg/statistics/store_load_test.go @@ -19,27 +19,28 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core/constant" + "github.com/tikv/pd/pkg/statistics/utils" ) func TestHistoryLoads(t *testing.T) { re := require.New(t) historySampleInterval = 0 - historyLoads := NewStoreHistoryLoads(DimLen) + historyLoads := NewStoreHistoryLoads(utils.DimLen) loads := []float64{1.0, 2.0, 3.0} - rwTp := Read + rwTp := utils.Read kind := constant.LeaderKind historyLoads.Add(1, rwTp, kind, loads) re.Len(historyLoads.Get(1, rwTp, kind)[0], 10) - expectLoads := make([][]float64, DimLen) + expectLoads := make([][]float64, utils.DimLen) for i := 0; i < len(loads); i++ { expectLoads[i] = make([]float64, 10) } for i := 0; i < 10; i++ { historyLoads.Add(1, rwTp, kind, loads) - expectLoads[ByteDim][i] = 1.0 - expectLoads[KeyDim][i] = 2.0 - expectLoads[QueryDim][i] = 3.0 + expectLoads[utils.ByteDim][i] = 1.0 + expectLoads[utils.KeyDim][i] = 2.0 + expectLoads[utils.QueryDim][i] = 3.0 } re.EqualValues(expectLoads, historyLoads.Get(1, rwTp, kind)) } diff --git a/pkg/statistics/util.go b/pkg/statistics/utils/constant.go similarity index 54% rename from pkg/statistics/util.go rename to pkg/statistics/utils/constant.go index 8c3a89d9c8e..14bee23778a 100644 --- a/pkg/statistics/util.go +++ b/pkg/statistics/utils/constant.go @@ -1,4 +1,4 @@ -// Copyright 2018 TiKV Project Authors. +// Copyright 2023 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -package statistics +package utils import ( - "fmt" + "github.com/docker/go-units" ) const ( - // StoreHeartBeatReportInterval is the heartbeat report interval of a store. - StoreHeartBeatReportInterval = 10 - // RegionHeartBeatReportInterval is the heartbeat report interval of a region. + // RegionHeartBeatReportInterval indicates the interval between write interval, the value is the heartbeat report interval of a region. RegionHeartBeatReportInterval = 60 + // StoreHeartBeatReportInterval indicates the interval between read stats report, the value is the heartbeat report interval of a store. + StoreHeartBeatReportInterval = 10 + + // HotRegionAntiCount is default value for antiCount + HotRegionAntiCount = 2 + // DefaultAotSize is default size of average over time. DefaultAotSize = 1 // DefaultWriteMfSize is default size of write median filter. @@ -31,6 +35,12 @@ const ( DefaultReadMfSize = 5 ) -func storeTag(id uint64) string { - return fmt.Sprintf("store-%d", id) +// MinHotThresholds is the threshold at which this dimension is recorded as a hot spot. +var MinHotThresholds = [RegionStatCount]float64{ + RegionReadBytes: 8 * units.KiB, + RegionReadKeys: 128, + RegionReadQueryNum: 128, + RegionWriteBytes: 1 * units.KiB, + RegionWriteKeys: 32, + RegionWriteQueryNum: 32, } diff --git a/pkg/statistics/kind.go b/pkg/statistics/utils/kind.go similarity index 93% rename from pkg/statistics/kind.go rename to pkg/statistics/utils/kind.go index 6a942e3feac..4d44b8d57e1 100644 --- a/pkg/statistics/kind.go +++ b/pkg/statistics/utils/kind.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package statistics +package utils import ( "github.com/tikv/pd/pkg/core" @@ -145,19 +145,20 @@ func (k StoreStatKind) String() string { return "unknown StoreStatKind" } -// sourceKind represents the statistics item source. -type sourceKind int +// SourceKind represents the statistics item source. +type SourceKind int +// Different statistics item sources. const ( - direct sourceKind = iota // there is a corresponding peer in this store. - inherit // there is no corresponding peer in this store and we need to copy from other stores. + Direct SourceKind = iota // there is a corresponding peer in this store. + Inherit // there is no corresponding peer in this store and we need to copy from other stores. ) -func (k sourceKind) String() string { +func (k SourceKind) String() string { switch k { - case direct: + case Direct: return "direct" - case inherit: + case Inherit: return "inherit" } return "unknown" @@ -213,9 +214,9 @@ func (rw RWType) Inverse() RWType { func (rw RWType) ReportInterval() int { switch rw { case Write: - return WriteReportInterval + return RegionHeartBeatReportInterval default: // Case Read - return ReadReportInterval + return StoreHeartBeatReportInterval } } diff --git a/pkg/statistics/kind_test.go b/pkg/statistics/utils/kind_test.go similarity index 99% rename from pkg/statistics/kind_test.go rename to pkg/statistics/utils/kind_test.go index 9928c7851a4..0a02ffa00c1 100644 --- a/pkg/statistics/kind_test.go +++ b/pkg/statistics/utils/kind_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package statistics +package utils import ( "testing" diff --git a/pkg/statistics/topn.go b/pkg/statistics/utils/topn.go similarity index 99% rename from pkg/statistics/topn.go rename to pkg/statistics/utils/topn.go index f5b71db66d5..916bbb82f92 100644 --- a/pkg/statistics/topn.go +++ b/pkg/statistics/utils/topn.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package statistics +package utils import ( "container/heap" diff --git a/pkg/statistics/topn_test.go b/pkg/statistics/utils/topn_test.go similarity index 99% rename from pkg/statistics/topn_test.go rename to pkg/statistics/utils/topn_test.go index 6aac24103aa..f92d5a61f34 100644 --- a/pkg/statistics/topn_test.go +++ b/pkg/statistics/utils/topn_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package statistics +package utils import ( "math/rand" diff --git a/pkg/storage/endpoint/key_path.go b/pkg/storage/endpoint/key_path.go index 08aebb400d9..a463fc8acf6 100644 --- a/pkg/storage/endpoint/key_path.go +++ b/pkg/storage/endpoint/key_path.go @@ -92,6 +92,21 @@ func ConfigPath(clusterID uint64) string { return path.Join(PDRootPath(clusterID), configPath) } +// RulesPath returns the path to save the placement rules. +func RulesPath(clusterID uint64) string { + return path.Join(PDRootPath(clusterID), rulesPath) +} + +// RuleGroupPath returns the path to save the placement rule groups. +func RuleGroupPath(clusterID uint64) string { + return path.Join(PDRootPath(clusterID), ruleGroupPath) +} + +// RegionLabelPath returns the path to save the region label. +func RegionLabelPath(clusterID uint64) string { + return path.Join(PDRootPath(clusterID), regionLabelPath) +} + func scheduleConfigPath(scheduleName string) string { return path.Join(customScheduleConfigPath, scheduleName) } diff --git a/pkg/storage/endpoint/rule.go b/pkg/storage/endpoint/rule.go index 9edaef444e7..a57d16aeb2e 100644 --- a/pkg/storage/endpoint/rule.go +++ b/pkg/storage/endpoint/rule.go @@ -15,7 +15,6 @@ package endpoint import ( - "path" "strings" "go.etcd.io/etcd/clientv3" @@ -38,7 +37,7 @@ var _ RuleStorage = (*StorageEndpoint)(nil) // SaveRule stores a rule cfg to the rulesPath. func (se *StorageEndpoint) SaveRule(ruleKey string, rule interface{}) error { - return se.saveJSON(path.Join(rulesPath, ruleKey), rule) + return se.saveJSON(ruleKeyPath(ruleKey), rule) } // DeleteRule removes a rule from storage. @@ -53,7 +52,7 @@ func (se *StorageEndpoint) LoadRuleGroups(f func(k, v string)) error { // SaveRuleGroup stores a rule group config to storage. func (se *StorageEndpoint) SaveRuleGroup(groupID string, group interface{}) error { - return se.saveJSON(path.Join(ruleGroupPath, groupID), group) + return se.saveJSON(ruleGroupIDPath(groupID), group) } // DeleteRuleGroup removes a rule group from storage. @@ -68,7 +67,7 @@ func (se *StorageEndpoint) LoadRegionRules(f func(k, v string)) error { // SaveRegionRule saves a region rule to the storage. func (se *StorageEndpoint) SaveRegionRule(ruleKey string, rule interface{}) error { - return se.saveJSON(path.Join(regionLabelPath, ruleKey), rule) + return se.saveJSON(regionLabelKeyPath(ruleKey), rule) } // DeleteRegionRule removes a region rule from storage. diff --git a/pkg/tso/allocator_manager.go b/pkg/tso/allocator_manager.go index d7a8a9eb81d..6fb31004db0 100644 --- a/pkg/tso/allocator_manager.go +++ b/pkg/tso/allocator_manager.go @@ -19,6 +19,7 @@ import ( "fmt" "math" "path" + "runtime/trace" "strconv" "strings" "sync" @@ -1135,7 +1136,8 @@ func (am *AllocatorManager) deleteAllocatorGroup(dcLocation string) { } // HandleRequest forwards TSO allocation requests to correct TSO Allocators. -func (am *AllocatorManager) HandleRequest(dcLocation string, count uint32) (pdpb.Timestamp, error) { +func (am *AllocatorManager) HandleRequest(ctx context.Context, dcLocation string, count uint32) (pdpb.Timestamp, error) { + defer trace.StartRegion(ctx, "AllocatorManager.HandleRequest").End() if len(dcLocation) == 0 { dcLocation = GlobalDCLocation } @@ -1145,7 +1147,7 @@ func (am *AllocatorManager) HandleRequest(dcLocation string, count uint32) (pdpb return pdpb.Timestamp{}, err } - return allocatorGroup.allocator.GenerateTSO(count) + return allocatorGroup.allocator.GenerateTSO(ctx, count) } // ResetAllocatorGroup will reset the allocator's leadership and TSO initialized in memory. diff --git a/pkg/tso/global_allocator.go b/pkg/tso/global_allocator.go index e81c05f9707..ed8136854fa 100644 --- a/pkg/tso/global_allocator.go +++ b/pkg/tso/global_allocator.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "runtime/trace" "sync" "sync/atomic" "time" @@ -61,7 +62,7 @@ type Allocator interface { SetTSO(tso uint64, ignoreSmaller, skipUpperBoundCheck bool) error // GenerateTSO is used to generate a given number of TSOs. // Make sure you have initialized the TSO allocator before calling. - GenerateTSO(count uint32) (pdpb.Timestamp, error) + GenerateTSO(ctx context.Context, count uint32) (pdpb.Timestamp, error) // Reset is used to reset the TSO allocator. Reset() } @@ -151,8 +152,8 @@ func (gta *GlobalTSOAllocator) GetTimestampPath() string { return gta.timestampOracle.GetTimestampPath() } -func (gta *GlobalTSOAllocator) estimateMaxTS(count uint32, suffixBits int) (*pdpb.Timestamp, bool, error) { - physical, logical, lastUpdateTime := gta.timestampOracle.generateTSO(int64(count), 0) +func (gta *GlobalTSOAllocator) estimateMaxTS(ctx context.Context, count uint32, suffixBits int) (*pdpb.Timestamp, bool, error) { + physical, logical, lastUpdateTime := gta.timestampOracle.generateTSO(ctx, int64(count), 0) if physical == 0 { return &pdpb.Timestamp{}, false, errs.ErrGenerateTimestamp.FastGenByArgs("timestamp in memory isn't initialized") } @@ -202,7 +203,8 @@ func (gta *GlobalTSOAllocator) SetTSO(tso uint64, ignoreSmaller, skipUpperBoundC // 1. Collect the max Local TSO from all Local TSO Allocator leaders and write it back to them as MaxTS. // 2. Estimate a MaxTS and try to write it to all Local TSO Allocator leaders directly to reduce the RTT. // During the process, if the estimated MaxTS is not accurate, it will fallback to the collecting way. -func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) { +func (gta *GlobalTSOAllocator) GenerateTSO(ctx context.Context, count uint32) (pdpb.Timestamp, error) { + defer trace.StartRegion(ctx, "GlobalTSOAllocator.GenerateTSO").End() if !gta.member.GetLeadership().Check() { tsoCounter.WithLabelValues("not_leader", gta.timestampOracle.dcLocation).Inc() return pdpb.Timestamp{}, errs.ErrGenerateTimestamp.FastGenByArgs(fmt.Sprintf("requested pd %s of cluster", errs.NotLeaderErr)) @@ -212,8 +214,9 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) // No dc-locations configured in the cluster, use the normal Global TSO generation way. // (without synchronization with other Local TSO Allocators) if len(dcLocationMap) == 0 { - return gta.timestampOracle.getTS(gta.member.GetLeadership(), count, 0) + return gta.timestampOracle.getTS(ctx, gta.member.GetLeadership(), count, 0) } + ctx1 := ctx // Have dc-locations configured in the cluster, use the Global TSO generation way. // (whit synchronization with other Local TSO Allocators) @@ -229,7 +232,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) ) // TODO: add a switch to control whether to enable the MaxTSO estimation. // 1. Estimate a MaxTS among all Local TSO Allocator leaders according to the RTT. - estimatedMaxTSO, shouldRetry, err = gta.estimateMaxTS(count, suffixBits) + estimatedMaxTSO, shouldRetry, err = gta.estimateMaxTS(ctx1, count, suffixBits) if err != nil { log.Error("global tso allocator estimates MaxTS failed", logutil.CondUint32("keyspace-group-id", gta.getGroupID(), gta.getGroupID() > 0), @@ -271,7 +274,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) } // 4. Persist MaxTS into memory, and etcd if needed var currentGlobalTSO *pdpb.Timestamp - if currentGlobalTSO, err = gta.getCurrentTSO(); err != nil { + if currentGlobalTSO, err = gta.getCurrentTSO(ctx1); err != nil { log.Error("global tso allocator gets the current global tso in memory failed", logutil.CondUint32("keyspace-group-id", gta.getGroupID(), gta.getGroupID() > 0), errs.ZapError(err)) @@ -280,7 +283,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) if tsoutil.CompareTimestamp(currentGlobalTSO, &globalTSOResp) < 0 { tsoCounter.WithLabelValues("global_tso_persist", gta.timestampOracle.dcLocation).Inc() // Update the Global TSO in memory - if err = gta.timestampOracle.resetUserTimestamp(gta.member.GetLeadership(), tsoutil.GenerateTS(&globalTSOResp), true); err != nil { + if err = gta.timestampOracle.resetUserTimestamp(ctx1, gta.member.GetLeadership(), tsoutil.GenerateTS(&globalTSOResp), true); err != nil { tsoCounter.WithLabelValues("global_tso_persist_err", gta.timestampOracle.dcLocation).Inc() log.Error("global tso allocator update the global tso in memory failed", logutil.CondUint32("keyspace-group-id", gta.getGroupID(), gta.getGroupID() > 0), @@ -350,6 +353,7 @@ func (gta *GlobalTSOAllocator) SyncMaxTS( maxTSO *pdpb.Timestamp, skipCheck bool, ) error { + defer trace.StartRegion(ctx, "GlobalTSOAllocator.SyncMaxTS").End() originalMaxTSO := *maxTSO for i := 0; i < syncMaxRetryCount; i++ { // Collect all allocator leaders' client URLs @@ -501,7 +505,8 @@ func (gta *GlobalTSOAllocator) checkSyncedDCs(dcLocationMap map[string]DCLocatio return len(unsyncedDCs) == 0, unsyncedDCs } -func (gta *GlobalTSOAllocator) getCurrentTSO() (*pdpb.Timestamp, error) { +func (gta *GlobalTSOAllocator) getCurrentTSO(ctx context.Context) (*pdpb.Timestamp, error) { + defer trace.StartRegion(ctx, "GlobalTSOAllocator.getCurrentTSO").End() currentPhysical, currentLogical := gta.timestampOracle.getTSO() if currentPhysical == typeutil.ZeroTime { return &pdpb.Timestamp{}, errs.ErrGenerateTimestamp.FastGenByArgs("timestamp in memory isn't initialized") diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index 9dd06242a78..b4f513eaa60 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -994,7 +994,7 @@ func (kgm *KeyspaceGroupManager) HandleTSORequest( if err != nil { return pdpb.Timestamp{}, curKeyspaceGroupID, err } - ts, err = am.HandleRequest(dcLocation, count) + ts, err = am.HandleRequest(context.Background(), dcLocation, count) return ts, curKeyspaceGroupID, err } @@ -1033,7 +1033,7 @@ func (kgm *KeyspaceGroupManager) GetMinTS( if kgm.kgs[i] != nil && kgm.kgs[i].IsSplitTarget() { continue } - ts, err := am.HandleRequest(dcLocation, 1) + ts, err := am.HandleRequest(context.Background(), dcLocation, 1) if err != nil { return pdpb.Timestamp{}, kgAskedCount, kgTotalCount, err } @@ -1077,11 +1077,11 @@ func (kgm *KeyspaceGroupManager) checkTSOSplit( if err != nil { return err } - splitTargetTSO, err := splitTargetAllocator.GenerateTSO(1) + splitTargetTSO, err := splitTargetAllocator.GenerateTSO(context.Background(), 1) if err != nil { return err } - splitSourceTSO, err := splitSourceAllocator.GenerateTSO(1) + splitSourceTSO, err := splitSourceAllocator.GenerateTSO(context.Background(), 1) if err != nil { return err } diff --git a/pkg/tso/local_allocator.go b/pkg/tso/local_allocator.go index 9995d5cec3f..a2459673c9b 100644 --- a/pkg/tso/local_allocator.go +++ b/pkg/tso/local_allocator.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "path" + "runtime/trace" "sync/atomic" "time" @@ -122,13 +123,14 @@ func (lta *LocalTSOAllocator) SetTSO(tso uint64, ignoreSmaller, skipUpperBoundCh // GenerateTSO is used to generate a given number of TSOs. // Make sure you have initialized the TSO allocator before calling. -func (lta *LocalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) { +func (lta *LocalTSOAllocator) GenerateTSO(ctx context.Context, count uint32) (pdpb.Timestamp, error) { + defer trace.StartRegion(ctx, "LocalTSOAllocator.GenerateTSO").End() if !lta.leadership.Check() { tsoCounter.WithLabelValues("not_leader", lta.timestampOracle.dcLocation).Inc() return pdpb.Timestamp{}, errs.ErrGenerateTimestamp.FastGenByArgs( fmt.Sprintf("requested pd %s of %s allocator", errs.NotLeaderErr, lta.timestampOracle.dcLocation)) } - return lta.timestampOracle.getTS(lta.leadership, count, lta.allocatorManager.GetSuffixBits()) + return lta.timestampOracle.getTS(ctx, lta.leadership, count, lta.allocatorManager.GetSuffixBits()) } // Reset is used to reset the TSO allocator. @@ -180,7 +182,7 @@ func (lta *LocalTSOAllocator) WriteTSO(maxTS *pdpb.Timestamp) error { if tsoutil.CompareTimestamp(currentTSO, maxTS) >= 0 { return nil } - return lta.timestampOracle.resetUserTimestamp(lta.leadership, tsoutil.GenerateTS(maxTS), true) + return lta.timestampOracle.resetUserTimestamp(context.Background(), lta.leadership, tsoutil.GenerateTS(maxTS), true) } // EnableAllocatorLeader sets the Local TSO Allocator itself to a leader. diff --git a/pkg/tso/tso.go b/pkg/tso/tso.go index 33da6e8d11f..15ce5ba4f9c 100644 --- a/pkg/tso/tso.go +++ b/pkg/tso/tso.go @@ -15,7 +15,9 @@ package tso import ( + "context" "fmt" + "runtime/trace" "sync/atomic" "time" @@ -103,7 +105,8 @@ func (t *timestampOracle) getTSO() (time.Time, int64) { } // generateTSO will add the TSO's logical part with the given count and returns the new TSO result. -func (t *timestampOracle) generateTSO(count int64, suffixBits int) (physical int64, logical int64, lastUpdateTime time.Time) { +func (t *timestampOracle) generateTSO(ctx context.Context, count int64, suffixBits int) (physical int64, logical int64, lastUpdateTime time.Time) { + defer trace.StartRegion(ctx, "timestampOracle.generateTSO").End() t.tsoMux.Lock() defer t.tsoMux.Unlock() if t.tsoMux.physical == typeutil.ZeroTime { @@ -201,7 +204,8 @@ func (t *timestampOracle) isInitialized() bool { // When ignoreSmaller is true, resetUserTimestamp will ignore the smaller tso resetting error and do nothing. // It's used to write MaxTS during the Global TSO synchronization without failing the writing as much as possible. // cannot set timestamp to one which >= current + maxResetTSGap -func (t *timestampOracle) resetUserTimestamp(leadership *election.Leadership, tso uint64, ignoreSmaller bool) error { +func (t *timestampOracle) resetUserTimestamp(ctx context.Context, leadership *election.Leadership, tso uint64, ignoreSmaller bool) error { + defer trace.StartRegion(ctx, "timestampOracle.resetUserTimestamp").End() return t.resetUserTimestampInner(leadership, tso, ignoreSmaller, false) } @@ -336,7 +340,8 @@ func (t *timestampOracle) UpdateTimestamp(leadership *election.Leadership) error var maxRetryCount = 10 // getTS is used to get a timestamp. -func (t *timestampOracle) getTS(leadership *election.Leadership, count uint32, suffixBits int) (pdpb.Timestamp, error) { +func (t *timestampOracle) getTS(ctx context.Context, leadership *election.Leadership, count uint32, suffixBits int) (pdpb.Timestamp, error) { + defer trace.StartRegion(ctx, "timestampOracle.getTS").End() var resp pdpb.Timestamp if count == 0 { return resp, errs.ErrGenerateTimestamp.FastGenByArgs("tso count should be positive") @@ -353,7 +358,7 @@ func (t *timestampOracle) getTS(leadership *election.Leadership, count uint32, s return pdpb.Timestamp{}, errs.ErrGenerateTimestamp.FastGenByArgs("timestamp in memory isn't initialized") } // Get a new TSO result with the given count - resp.Physical, resp.Logical, _ = t.generateTSO(int64(count), suffixBits) + resp.Physical, resp.Logical, _ = t.generateTSO(ctx, int64(count), suffixBits) if resp.GetPhysical() == 0 { return pdpb.Timestamp{}, errs.ErrGenerateTimestamp.FastGenByArgs("timestamp in memory has been reset") } diff --git a/pkg/unsaferecovery/unsafe_recovery_controller.go b/pkg/unsaferecovery/unsafe_recovery_controller.go index b3716d145d1..675f76b91a9 100644 --- a/pkg/unsaferecovery/unsafe_recovery_controller.go +++ b/pkg/unsaferecovery/unsafe_recovery_controller.go @@ -31,9 +31,9 @@ import ( "github.com/tikv/pd/pkg/codec" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" + sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" - "github.com/tikv/pd/server/config" "go.uber.org/zap" ) @@ -109,7 +109,7 @@ type cluster interface { DropCacheAllRegion() AllocID() (uint64, error) BuryStore(storeID uint64, forceBury bool) error - GetPersistOptions() *config.PersistOptions + GetSchedulerConfig() sc.SchedulerConfigProvider } // Controller is used to control the unsafe recovery process. @@ -136,6 +136,7 @@ type Controller struct { // exposed to the outside for testing AffectedTableIDs map[int64]struct{} affectedMetaRegions map[uint64]struct{} + newlyCreatedRegions map[uint64]struct{} err error } @@ -167,6 +168,7 @@ func (u *Controller) reset() { u.output = make([]StageOutput, 0) u.AffectedTableIDs = make(map[int64]struct{}, 0) u.affectedMetaRegions = make(map[uint64]struct{}, 0) + u.newlyCreatedRegions = make(map[uint64]struct{}, 0) u.err = nil } @@ -493,7 +495,7 @@ func (u *Controller) changeStage(stage stage) { u.stage = stage // Halt and resume the scheduling once the running state changed. running := isRunning(stage) - if opt := u.cluster.GetPersistOptions(); opt.IsSchedulingHalted() != running { + if opt := u.cluster.GetSchedulerConfig(); opt.IsSchedulingHalted() != running { opt.SetHaltScheduling(running, "online-unsafe-recovery") } @@ -666,6 +668,15 @@ func (u *Controller) getAffectedTableDigest() []string { } details = append(details, "affected table ids: "+strings.Trim(tables, ", ")) } + if len(u.newlyCreatedRegions) != 0 { + regions := "" + for r := range u.newlyCreatedRegions { + regions += fmt.Sprintf("%d, ", r) + } + details = append(details, "newly created empty regions: "+strings.Trim(regions, ", ")) + } else { + details = append(details, "no newly created empty regions") + } return details } @@ -1201,6 +1212,7 @@ func (u *Controller) generateCreateEmptyRegionPlan(newestRegionTree *regionTree, storeRecoveryPlan := u.getRecoveryPlan(storeID) storeRecoveryPlan.Creates = append(storeRecoveryPlan.Creates, newRegion) u.recordAffectedRegion(newRegion) + u.newlyCreatedRegions[newRegion.GetId()] = struct{}{} hasPlan = true } lastEnd = region.EndKey diff --git a/pkg/utils/etcdutil/etcdutil.go b/pkg/utils/etcdutil/etcdutil.go index ad4d3c4afe5..b59a9581996 100644 --- a/pkg/utils/etcdutil/etcdutil.go +++ b/pkg/utils/etcdutil/etcdutil.go @@ -270,7 +270,7 @@ func CreateEtcdClient(tlsConfig *tls.Config, acURLs []url.URL) (*clientv3.Client for { select { case <-client.Ctx().Done(): - log.Info("[etcd client] etcd client is closed, exit health check goroutine") + log.Info("etcd client is closed, exit health check goroutine") checker.Range(func(key, value interface{}) bool { client := value.(*healthyClient) client.Close() @@ -287,7 +287,7 @@ func CreateEtcdClient(tlsConfig *tls.Config, acURLs []url.URL) (*clientv3.Client // otherwise, the subconn will be retrying in grpc layer and use exponential backoff, // and it cannot recover as soon as possible. if time.Since(lastAvailable) > etcdServerDisconnectedTimeout { - log.Info("[etcd client] no available endpoint, try to reset endpoints", zap.Strings("last-endpoints", usedEps)) + log.Info("no available endpoint, try to reset endpoints", zap.Strings("last-endpoints", usedEps)) client.SetEndpoints([]string{}...) client.SetEndpoints(usedEps...) } @@ -296,7 +296,7 @@ func CreateEtcdClient(tlsConfig *tls.Config, acURLs []url.URL) (*clientv3.Client client.SetEndpoints(healthyEps...) change := fmt.Sprintf("%d->%d", len(usedEps), len(healthyEps)) etcdStateGauge.WithLabelValues("endpoints").Set(float64(len(healthyEps))) - log.Info("[etcd client] update endpoints", zap.String("num-change", change), + log.Info("update endpoints", zap.String("num-change", change), zap.Strings("last-endpoints", usedEps), zap.Strings("endpoints", client.Endpoints())) } lastAvailable = time.Now() @@ -313,7 +313,7 @@ func CreateEtcdClient(tlsConfig *tls.Config, acURLs []url.URL) (*clientv3.Client for { select { case <-client.Ctx().Done(): - log.Info("[etcd client] etcd client is closed, exit update endpoint goroutine") + log.Info("etcd client is closed, exit update endpoint goroutine") return case <-ticker.C: eps := syncUrls(client) @@ -377,7 +377,7 @@ func (checker *healthyChecker) update(eps []string) { if client, ok := checker.Load(ep); ok { lastHealthy := client.(*healthyClient).lastHealth if time.Since(lastHealthy) > etcdServerOfflineTimeout { - log.Info("[etcd client] some etcd server maybe offline", zap.String("endpoint", ep)) + log.Info("some etcd server maybe offline", zap.String("endpoint", ep)) checker.Delete(ep) } if time.Since(lastHealthy) > etcdServerDisconnectedTimeout { @@ -394,7 +394,7 @@ func (checker *healthyChecker) update(eps []string) { func (checker *healthyChecker) addClient(ep string, lastHealth time.Time) { client, err := newClient(checker.tlsConfig, ep) if err != nil { - log.Error("[etcd client] failed to create etcd healthy client", zap.Error(err)) + log.Error("failed to create etcd healthy client", zap.Error(err)) return } checker.Store(ep, &healthyClient{ @@ -409,7 +409,7 @@ func syncUrls(client *clientv3.Client) []string { defer cancel() mresp, err := client.MemberList(ctx) if err != nil { - log.Error("[etcd client] failed to list members", errs.ZapError(err)) + log.Error("failed to list members", errs.ZapError(err)) return []string{} } var eps []string @@ -433,12 +433,16 @@ func CreateClients(tlsConfig *tls.Config, acUrls []url.URL) (*clientv3.Client, * // createHTTPClient creates a http client with the given tls config. func createHTTPClient(tlsConfig *tls.Config) *http.Client { - return &http.Client{ - Transport: &http.Transport{ - DisableKeepAlives: true, - TLSClientConfig: tlsConfig, - }, + // FIXME: Currently, there is no timeout set for certain requests, such as GetRegions, + // which may take a significant amount of time. However, it might be necessary to + // define an appropriate timeout in the future. + cli := &http.Client{} + if tlsConfig != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConfig + cli.Transport = transport } + return cli } // InitClusterID creates a cluster ID for the given key if it hasn't existed. @@ -566,8 +570,13 @@ type LoopWatcher struct { } // NewLoopWatcher creates a new LoopWatcher. -func NewLoopWatcher(ctx context.Context, wg *sync.WaitGroup, client *clientv3.Client, name, key string, - putFn, deleteFn func(*mvccpb.KeyValue) error, postEventFn func() error, opts ...clientv3.OpOption) *LoopWatcher { +func NewLoopWatcher( + ctx context.Context, wg *sync.WaitGroup, + client *clientv3.Client, + name, key string, + putFn, deleteFn func(*mvccpb.KeyValue) error, postEventFn func() error, + opts ...clientv3.OpOption, +) *LoopWatcher { return &LoopWatcher{ ctx: ctx, client: client, diff --git a/pkg/utils/grpcutil/grpcutil_test.go b/pkg/utils/grpcutil/grpcutil_test.go index ea1de4fb681..21b7e1a4acb 100644 --- a/pkg/utils/grpcutil/grpcutil_test.go +++ b/pkg/utils/grpcutil/grpcutil_test.go @@ -2,6 +2,8 @@ package grpcutil import ( "os" + "os/exec" + "path" "testing" "github.com/pingcap/errors" @@ -9,6 +11,11 @@ import ( "github.com/tikv/pd/pkg/errs" ) +var ( + certPath = "../../../tests/integrations/client/" + certScript = "cert_opt.sh" +) + func loadTLSContent(re *require.Assertions, caPath, certPath, keyPath string) (caData, certData, keyData []byte) { var err error caData, err = os.ReadFile(caPath) @@ -21,12 +28,21 @@ func loadTLSContent(re *require.Assertions, caPath, certPath, keyPath string) (c } func TestToTLSConfig(t *testing.T) { + if err := exec.Command(certPath+certScript, "generate", certPath).Run(); err != nil { + t.Fatal(err) + } + defer func() { + if err := exec.Command(certPath+certScript, "cleanup", certPath).Run(); err != nil { + t.Fatal(err) + } + }() + t.Parallel() re := require.New(t) tlsConfig := TLSConfig{ - KeyPath: "../../../tests/integrations/client/cert/pd-server-key.pem", - CertPath: "../../../tests/integrations/client/cert/pd-server.pem", - CAPath: "../../../tests/integrations/client/cert/ca.pem", + KeyPath: path.Join(certPath, "pd-server-key.pem"), + CertPath: path.Join(certPath, "pd-server.pem"), + CAPath: path.Join(certPath, "ca.pem"), } // test without bytes _, err := tlsConfig.ToTLSConfig() diff --git a/pkg/utils/testutil/api_check.go b/pkg/utils/testutil/api_check.go index c17c6970ab7..fcc445b7e7a 100644 --- a/pkg/utils/testutil/api_check.go +++ b/pkg/utils/testutil/api_check.go @@ -25,8 +25,8 @@ import ( // Status is used to check whether http response code is equal given code func Status(re *require.Assertions, code int) func([]byte, int) { - return func(_ []byte, i int) { - re.Equal(code, i) + return func(resp []byte, i int) { + re.Equal(code, i, "resp: "+string(resp)) } } diff --git a/server/api/hot_status.go b/server/api/hot_status.go index 749a371300a..4f64f1bebc5 100644 --- a/server/api/hot_status.go +++ b/server/api/hot_status.go @@ -22,8 +22,8 @@ import ( "strconv" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/server" "github.com/unrolled/render" @@ -53,10 +53,10 @@ func convert(buckets *buckets.BucketStat) *HotBucketsItem { StartKey: core.HexRegionKeyStr(buckets.StartKey), EndKey: core.HexRegionKeyStr(buckets.EndKey), HotDegree: buckets.HotDegree, - ReadBytes: buckets.Loads[statistics.RegionReadBytes], - ReadKeys: buckets.Loads[statistics.RegionReadKeys], - WriteBytes: buckets.Loads[statistics.RegionWriteBytes], - WriteKeys: buckets.Loads[statistics.RegionWriteKeys], + ReadBytes: buckets.Loads[utils.RegionReadBytes], + ReadKeys: buckets.Loads[utils.RegionReadKeys], + WriteBytes: buckets.Loads[utils.RegionWriteBytes], + WriteKeys: buckets.Loads[utils.RegionWriteKeys], } } @@ -182,16 +182,16 @@ func (h *hotStatusHandler) GetHotStores(w http.ResponseWriter, r *http.Request) id := store.GetID() if loads, ok := storesLoads[id]; ok { if store.IsTiFlash() { - stats.BytesWriteStats[id] = loads[statistics.StoreRegionsWriteBytes] - stats.KeysWriteStats[id] = loads[statistics.StoreRegionsWriteKeys] + stats.BytesWriteStats[id] = loads[utils.StoreRegionsWriteBytes] + stats.KeysWriteStats[id] = loads[utils.StoreRegionsWriteKeys] } else { - stats.BytesWriteStats[id] = loads[statistics.StoreWriteBytes] - stats.KeysWriteStats[id] = loads[statistics.StoreWriteKeys] + stats.BytesWriteStats[id] = loads[utils.StoreWriteBytes] + stats.KeysWriteStats[id] = loads[utils.StoreWriteKeys] } - stats.BytesReadStats[id] = loads[statistics.StoreReadBytes] - stats.KeysReadStats[id] = loads[statistics.StoreReadKeys] - stats.QueryWriteStats[id] = loads[statistics.StoreWriteQuery] - stats.QueryReadStats[id] = loads[statistics.StoreReadQuery] + stats.BytesReadStats[id] = loads[utils.StoreReadBytes] + stats.KeysReadStats[id] = loads[utils.StoreReadKeys] + stats.QueryWriteStats[id] = loads[utils.StoreWriteQuery] + stats.QueryReadStats[id] = loads[utils.StoreReadQuery] } } h.rd.JSON(w, http.StatusOK, stats) diff --git a/server/api/router.go b/server/api/router.go index 1e0d12d53b6..ea99e82cdd8 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -290,7 +290,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { registerFunc(apiRouter, "/leader/transfer/{next_leader}", leaderHandler.TransferLeader, setMethods(http.MethodPost), setAuditBackend(localLog, prometheus)) statsHandler := newStatsHandler(svr, rd) - registerFunc(clusterRouter, "/stats/region", statsHandler.GetRegionStatus, setMethods(http.MethodGet), setAuditBackend(localLog, prometheus)) + registerFunc(clusterRouter, "/stats/region", statsHandler.GetRegionStatus, setMethods(http.MethodGet), setAuditBackend(prometheus)) trendHandler := newTrendHandler(svr, rd) registerFunc(apiRouter, "/trend", trendHandler.GetTrend, setMethods(http.MethodGet), setAuditBackend(prometheus)) diff --git a/server/api/scheduler_test.go b/server/api/scheduler_test.go index 026d7a3cd2f..429026ec502 100644 --- a/server/api/scheduler_test.go +++ b/server/api/scheduler_test.go @@ -52,6 +52,8 @@ func (suite *scheduleTestSuite) SetupSuite() { mustBootstrapCluster(re, suite.svr) mustPutStore(re, suite.svr, 1, metapb.StoreState_Up, metapb.NodeState_Serving, nil) mustPutStore(re, suite.svr, 2, metapb.StoreState_Up, metapb.NodeState_Serving, nil) + mustPutStore(re, suite.svr, 3, metapb.StoreState_Up, metapb.NodeState_Serving, nil) + mustPutStore(re, suite.svr, 4, metapb.StoreState_Up, metapb.NodeState_Serving, nil) } func (suite *scheduleTestSuite) TearDownSuite() { @@ -126,7 +128,8 @@ func (suite *scheduleTestSuite) TestAPI() { extraTestFunc func(name string) }{ { - name: "balance-leader-scheduler", + name: "balance-leader-scheduler", + createdName: "balance-leader-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) @@ -175,7 +178,8 @@ func (suite *scheduleTestSuite) TestAPI() { }, }, { - name: "balance-hot-region-scheduler", + name: "balance-hot-region-scheduler", + createdName: "balance-hot-region-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) @@ -214,12 +218,25 @@ func (suite *scheduleTestSuite) TestAPI() { suite.NoError(err) }, }, - {name: "balance-region-scheduler"}, - {name: "shuffle-leader-scheduler"}, - {name: "shuffle-region-scheduler"}, - {name: "transfer-witness-leader-scheduler"}, { - name: "balance-witness-scheduler", + name: "balance-region-scheduler", + createdName: "balance-region-scheduler", + }, + { + name: "shuffle-leader-scheduler", + createdName: "shuffle-leader-scheduler", + }, + { + name: "shuffle-region-scheduler", + createdName: "shuffle-region-scheduler", + }, + { + name: "transfer-witness-leader-scheduler", + createdName: "transfer-witness-leader-scheduler", + }, + { + name: "balance-witness-scheduler", + createdName: "balance-witness-scheduler", extraTestFunc: func(name string) { resp := make(map[string]interface{}) listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) @@ -333,36 +350,36 @@ func (suite *scheduleTestSuite) TestAPI() { { name: "evict-leader-scheduler", createdName: "evict-leader-scheduler", - args: []arg{{"store_id", 1}}, + args: []arg{{"store_id", 3}}, // Test the scheduler config handler. extraTestFunc: func(name string) { resp := make(map[string]interface{}) listURL := fmt.Sprintf("%s%s%s/%s/list", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) exceptMap := make(map[string]interface{}) - exceptMap["1"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} + exceptMap["3"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} suite.Equal(exceptMap, resp["store-id-ranges"]) // using /pd/v1/schedule-config/evict-leader-scheduler/config to add new store to evict-leader-scheduler input := make(map[string]interface{}) input["name"] = "evict-leader-scheduler" - input["store_id"] = 2 + input["store_id"] = 4 updateURL := fmt.Sprintf("%s%s%s/%s/config", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name) body, err := json.Marshal(input) suite.NoError(err) suite.NoError(tu.CheckPostJSON(testDialClient, updateURL, body, tu.StatusOK(re))) resp = make(map[string]interface{}) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) - exceptMap["2"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} + exceptMap["4"] = []interface{}{map[string]interface{}{"end-key": "", "start-key": ""}} suite.Equal(exceptMap, resp["store-id-ranges"]) // using /pd/v1/schedule-config/evict-leader-scheduler/config to delete exist store from evict-leader-scheduler - deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name, "2") + deleteURL := fmt.Sprintf("%s%s%s/%s/delete/%s", suite.svr.GetAddr(), apiPrefix, server.SchedulerConfigHandlerPath, name, "4") _, err = apiutil.DoDelete(testDialClient, deleteURL) suite.NoError(err) resp = make(map[string]interface{}) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) - delete(exceptMap, "2") + delete(exceptMap, "4") suite.Equal(exceptMap, resp["store-id-ranges"]) statusCode, err := apiutil.DoDelete(testDialClient, deleteURL) suite.NoError(err) @@ -379,12 +396,15 @@ func (suite *scheduleTestSuite) TestAPI() { body, err := json.Marshal(input) suite.NoError(err) suite.testPauseOrResume(testCase.name, testCase.createdName, body) + if testCase.extraTestFunc != nil { + testCase.extraTestFunc(testCase.createdName) + } + suite.deleteScheduler(testCase.createdName) } // test pause and resume all schedulers. // add schedulers. - testCases = testCases[:3] for _, testCase := range testCases { input := make(map[string]interface{}) input["name"] = testCase.name @@ -394,6 +414,9 @@ func (suite *scheduleTestSuite) TestAPI() { body, err := json.Marshal(input) suite.NoError(err) suite.addScheduler(body) + if testCase.extraTestFunc != nil { + testCase.extraTestFunc(testCase.createdName) + } } // test pause all schedulers. @@ -565,5 +588,4 @@ func (suite *scheduleTestSuite) testPauseOrResume(name, createdName string, body isPaused, err = handler.IsSchedulerPaused(createdName) suite.NoError(err) suite.False(isPaused) - suite.deleteScheduler(createdName) } diff --git a/server/apiv2/handlers/tso_keyspace_group.go b/server/apiv2/handlers/tso_keyspace_group.go index fc7ef680104..d16cd333e87 100644 --- a/server/apiv2/handlers/tso_keyspace_group.go +++ b/server/apiv2/handlers/tso_keyspace_group.go @@ -243,14 +243,20 @@ func SplitKeyspaceGroupByID(c *gin.Context) { } svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) + manager := svr.GetKeyspaceManager() + if manager == nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, managerUninitializedErr) + return + } + groupManager := svr.GetKeyspaceGroupManager() + if groupManager == nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, groupManagerUninitializedErr) + return + } + patrolKeyspaceAssignmentState.Lock() if !patrolKeyspaceAssignmentState.patrolled { // Patrol keyspace assignment before splitting keyspace group. - manager := svr.GetKeyspaceManager() - if manager == nil { - c.AbortWithStatusJSON(http.StatusInternalServerError, managerUninitializedErr) - return - } err = manager.PatrolKeyspaceAssignment(splitParams.StartKeyspaceID, splitParams.EndKeyspaceID) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) @@ -260,11 +266,7 @@ func SplitKeyspaceGroupByID(c *gin.Context) { patrolKeyspaceAssignmentState.patrolled = true } patrolKeyspaceAssignmentState.Unlock() - groupManager := svr.GetKeyspaceGroupManager() - if groupManager == nil { - c.AbortWithStatusJSON(http.StatusInternalServerError, groupManagerUninitializedErr) - return - } + // Split keyspace group. err = groupManager.SplitKeyspaceGroupByID( id, splitParams.NewID, @@ -286,6 +288,10 @@ func FinishSplitKeyspaceByID(c *gin.Context) { svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) manager := svr.GetKeyspaceGroupManager() + if manager == nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, groupManagerUninitializedErr) + return + } err = manager.FinishSplitKeyspaceByID(id) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) @@ -357,6 +363,10 @@ func FinishMergeKeyspaceByID(c *gin.Context) { svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) manager := svr.GetKeyspaceGroupManager() + if manager == nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, groupManagerUninitializedErr) + return + } err = manager.FinishMergeKeyspaceByID(id) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index b2ad25cf0ca..06de6f9a56e 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -15,8 +15,11 @@ package cluster import ( + "bytes" "context" + "encoding/json" "fmt" + "io" "math" "net/http" "strconv" @@ -53,6 +56,7 @@ import ( "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/syncer" @@ -139,12 +143,11 @@ type RaftCluster struct { etcdClient *clientv3.Client httpClient *http.Client - running bool - meta *metapb.Cluster - storeConfigManager *config.StoreConfigManager - storage storage.Storage - minResolvedTS uint64 - externalTS uint64 + running bool + meta *metapb.Cluster + storage storage.Storage + minResolvedTS uint64 + externalTS uint64 // Keep the previous store limit settings when removing a store. prevStoreLimit map[uint64]map[storelimit.Type]float64 @@ -159,7 +162,6 @@ type RaftCluster struct { labelLevelStats *statistics.LabelStatistics regionStats *statistics.RegionStatistics hotStat *statistics.HotStat - hotBuckets *buckets.HotBucketCache slowStat *statistics.SlowStat ruleManager *placement.RuleManager regionLabeler *labeler.RegionLabeler @@ -193,7 +195,7 @@ func NewRaftCluster(ctx context.Context, clusterID uint64, regionSyncer *syncer. // GetStoreConfig returns the store config. func (c *RaftCluster) GetStoreConfig() sc.StoreConfigProvider { - return c.storeConfigManager.GetStoreConfig() + return c.GetOpts() } // GetCheckerConfig returns the checker config. @@ -259,15 +261,14 @@ func (c *RaftCluster) loadBootstrapTime() (time.Time, error) { // InitCluster initializes the raft cluster. func (c *RaftCluster) InitCluster( id id.Allocator, - opt *config.PersistOptions, + opt sc.ConfProvider, storage storage.Storage, basicCluster *core.BasicCluster, keyspaceGroupManager *keyspace.GroupManager) { - c.core, c.opt, c.storage, c.id = basicCluster, opt, storage, id + c.core, c.opt, c.storage, c.id = basicCluster, opt.(*config.PersistOptions), storage, id c.ctx, c.cancel = context.WithCancel(c.serverCtx) c.labelLevelStats = statistics.NewLabelStatistics() c.hotStat = statistics.NewHotStat(c.ctx) - c.hotBuckets = buckets.NewBucketsCache(c.ctx) c.slowStat = statistics.NewSlowStat(c.ctx) c.progressManager = progress.NewManager() c.changedRegions = make(chan *core.RegionInfo, defaultChangedRegionsLimit) @@ -318,9 +319,8 @@ func (c *RaftCluster) Start(s Server) error { if err != nil { return err } - c.storeConfigManager = config.NewStoreConfigManager(c.httpClient) c.coordinator = schedule.NewCoordinator(c.ctx, cluster, s.GetHBStreams()) - c.regionStats = statistics.NewRegionStatistics(c.core, c.opt, c.ruleManager, c.storeConfigManager) + c.regionStats = statistics.NewRegionStatistics(c.core, c.opt, c.ruleManager) c.limiter = NewStoreLimiter(s.GetPersistOptions()) c.externalTS, err = c.storage.LoadExternalTS() if err != nil { @@ -335,7 +335,7 @@ func (c *RaftCluster) Start(s Server) error { go c.syncRegions() go c.runReplicationMode() go c.runMinResolvedTSJob() - go c.runSyncConfig() + go c.runStoreConfigSync() go c.runUpdateStoreStats() go c.startGCTuner() @@ -412,44 +412,50 @@ func (c *RaftCluster) startGCTuner() { } } -// runSyncConfig runs the job to sync tikv config. -func (c *RaftCluster) runSyncConfig() { +// runStoreConfigSync runs the job to sync the store config from TiKV. +func (c *RaftCluster) runStoreConfigSync() { defer logutil.LogPanic() defer c.wg.Done() + + var ( + synced, switchRaftV2Config bool + stores = c.GetStores() + ) ticker := time.NewTicker(time.Minute) defer ticker.Stop() - - stores := c.GetStores() - syncFunc := func() { - synced, switchRaftV2Config := syncConfig(c.storeConfigManager, stores) + for { + synced, switchRaftV2Config = c.syncStoreConfig(stores) if switchRaftV2Config { - c.GetOpts().UseRaftV2() if err := c.opt.Persist(c.GetStorage()); err != nil { log.Warn("store config persisted failed", zap.Error(err)) } } + // Update the stores if the synchronization is not completed. if !synced { stores = c.GetStores() + } else if err := c.opt.Persist(c.storage); err != nil { + log.Warn("store config persisted failed", zap.Error(err)) } - } - - syncFunc() - for { select { case <-c.ctx.Done(): log.Info("sync store config job is stopped") return case <-ticker.C: - syncFunc() } } } -// syncConfig syncs the config of the stores. -// synced is true if sync config from one tikv. -// switchRaftV2 is true if the config of tikv engine is changed and engine is raft-kv2. -func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) (synced bool, switchRaftV2 bool) { +// syncStoreConfig syncs the store config from TiKV. +// - `synced` is true if sync config from one tikv. +// - `switchRaftV2` is true if the config of tikv engine is change to raft-kv2. +func (c *RaftCluster) syncStoreConfig(stores []*core.StoreInfo) (synced bool, switchRaftV2 bool) { for index := 0; index < len(stores); index++ { + select { + case <-c.ctx.Done(): + log.Info("stop sync store config job due to server shutdown") + return + default: + } // filter out the stores that are tiflash store := stores[index] if store.IsTiFlash() { @@ -462,8 +468,11 @@ func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) (s } // it will try next store if the current store is failed. address := netutil.ResolveLoopBackAddr(stores[index].GetStatusAddress(), stores[index].GetAddress()) - switchRaftV2, err := manager.ObserveConfig(address) + switchRaftV2, err := c.observeStoreConfig(c.ctx, address) if err != nil { + // delete the store if it is failed and retry next store. + stores = append(stores[:index], stores[index+1:]...) + index-- storeSyncConfigEvent.WithLabelValues(address, "fail").Inc() log.Debug("sync store config failed, it will try next store", zap.Error(err)) continue @@ -477,6 +486,73 @@ func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) (s return false, false } +// observeStoreConfig is used to observe the store config changes and +// return whether if the new config changes the engine to raft-kv2. +func (c *RaftCluster) observeStoreConfig(ctx context.Context, address string) (bool, error) { + cfg, err := c.fetchStoreConfigFromTiKV(ctx, address) + if err != nil { + return false, err + } + oldCfg := c.opt.GetStoreConfig() + if cfg == nil || oldCfg.Equal(cfg) { + return false, nil + } + log.Info("sync the store config successful", + zap.String("store-address", address), + zap.String("store-config", cfg.String()), + zap.String("old-config", oldCfg.String())) + return c.updateStoreConfig(oldCfg, cfg) +} + +// updateStoreConfig updates the store config. This is extracted for testing. +func (c *RaftCluster) updateStoreConfig(oldCfg, cfg *sc.StoreConfig) (bool, error) { + cfg.Adjust() + c.opt.SetStoreConfig(cfg) + return oldCfg.Storage.Engine != sc.RaftstoreV2 && cfg.Storage.Engine == sc.RaftstoreV2, nil +} + +// fetchStoreConfigFromTiKV tries to fetch the config from the TiKV store URL. +func (c *RaftCluster) fetchStoreConfigFromTiKV(ctx context.Context, statusAddress string) (*sc.StoreConfig, error) { + cfg := &sc.StoreConfig{} + failpoint.Inject("mockFetchStoreConfigFromTiKV", func(val failpoint.Value) { + if regionMaxSize, ok := val.(string); ok { + cfg.RegionMaxSize = regionMaxSize + cfg.Storage.Engine = sc.RaftstoreV2 + } + failpoint.Return(cfg, nil) + }) + if c.httpClient == nil { + return nil, fmt.Errorf("failed to get store config due to nil client") + } + var url string + if netutil.IsEnableHTTPS(c.httpClient) { + url = fmt.Sprintf("%s://%s/config", "https", statusAddress) + } else { + url = fmt.Sprintf("%s://%s/config", "http", statusAddress) + } + ctx, cancel := context.WithTimeout(ctx, clientTimeout) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, bytes.NewBuffer(nil)) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to create store config http request: %w", err) + } + resp, err := c.httpClient.Do(req) + if err != nil { + cancel() + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + cancel() + if err != nil { + return nil, err + } + if err := json.Unmarshal(body, cfg); err != nil { + return nil, err + } + return cfg, nil +} + // LoadClusterInfo loads cluster related info. func (c *RaftCluster) LoadClusterInfo() (*RaftCluster, error) { c.meta = &metapb.Cluster{} @@ -759,11 +835,6 @@ func (c *RaftCluster) GetOpts() sc.ConfProvider { return c.opt } -// GetPersistOptions returns cluster's configuration. -func (c *RaftCluster) GetPersistOptions() *config.PersistOptions { - return c.opt -} - // GetScheduleConfig returns scheduling configurations. func (c *RaftCluster) GetScheduleConfig() *sc.ScheduleConfig { return c.opt.GetScheduleConfig() @@ -912,12 +983,12 @@ func (c *RaftCluster) HandleStoreHeartbeat(heartbeat *pdpb.StoreHeartbeatRequest } readQueryNum := core.GetReadQueryNum(peerStat.GetQueryStats()) loads := []float64{ - statistics.RegionReadBytes: float64(peerStat.GetReadBytes()), - statistics.RegionReadKeys: float64(peerStat.GetReadKeys()), - statistics.RegionReadQueryNum: float64(readQueryNum), - statistics.RegionWriteBytes: 0, - statistics.RegionWriteKeys: 0, - statistics.RegionWriteQueryNum: 0, + utils.RegionReadBytes: float64(peerStat.GetReadBytes()), + utils.RegionReadKeys: float64(peerStat.GetReadKeys()), + utils.RegionReadQueryNum: float64(readQueryNum), + utils.RegionWriteBytes: 0, + utils.RegionWriteKeys: 0, + utils.RegionWriteQueryNum: 0, } peerInfo := core.NewPeerInfo(peer, loads, interval) c.hotStat.CheckReadAsync(statistics.NewCheckPeerTask(peerInfo, region)) @@ -987,7 +1058,7 @@ func (c *RaftCluster) processRegionHeartbeat(region *core.RegionInfo) error { if err != nil { return err } - region.Inherit(origin, c.storeConfigManager.GetStoreConfig().IsEnableRegionBucket()) + region.Inherit(origin, c.GetStoreConfig().IsEnableRegionBucket()) c.hotStat.CheckWriteAsync(statistics.NewCheckExpiredItemTask(region)) c.hotStat.CheckReadAsync(statistics.NewCheckExpiredItemTask(region)) @@ -1000,7 +1071,7 @@ func (c *RaftCluster) processRegionHeartbeat(region *core.RegionInfo) error { c.coordinator.GetSchedulersController().CheckTransferWitnessLeader(region) hasRegionStats := c.regionStats != nil - // Save to storage if meta is updated. + // Save to storage if meta is updated, except for flashback. // Save to cache if meta or leader is updated, or contains any down/pending peer. // Mark isNew if the region in cache does not have leader. isNew, saveKV, saveCache, needSync := regionGuide(region, origin) @@ -2046,7 +2117,7 @@ func (c *RaftCluster) deleteStore(store *core.StoreInfo) error { } func (c *RaftCluster) collectMetrics() { - statsMap := statistics.NewStoreStatisticsMap(c.opt, c.storeConfigManager.GetStoreConfig()) + statsMap := statistics.NewStoreStatisticsMap(c.opt) stores := c.GetStores() for _, s := range stores { statsMap.Observe(s, c.hotStat.StoresStats) @@ -2060,7 +2131,7 @@ func (c *RaftCluster) collectMetrics() { } func (c *RaftCluster) resetMetrics() { - statsMap := statistics.NewStoreStatisticsMap(c.opt, c.storeConfigManager.GetStoreConfig()) + statsMap := statistics.NewStoreStatisticsMap(c.opt) statsMap.Reset() c.coordinator.GetSchedulersController().ResetSchedulerMetrics() @@ -2243,7 +2314,7 @@ func (c *RaftCluster) IsRegionHot(region *core.RegionInfo) bool { } // GetHotPeerStat returns hot peer stat with specified regionID and storeID. -func (c *RaftCluster) GetHotPeerStat(rw statistics.RWType, regionID, storeID uint64) *statistics.HotPeerStat { +func (c *RaftCluster) GetHotPeerStat(rw utils.RWType, regionID, storeID uint64) *statistics.HotPeerStat { return c.hotStat.GetHotPeerStat(rw, regionID, storeID) } @@ -2253,24 +2324,20 @@ func (c *RaftCluster) GetHotPeerStat(rw statistics.RWType, regionID, storeID uin func (c *RaftCluster) RegionReadStats() map[uint64][]*statistics.HotPeerStat { // As read stats are reported by store heartbeat, the threshold needs to be adjusted. threshold := c.GetOpts().GetHotRegionCacheHitsThreshold() * - (statistics.RegionHeartBeatReportInterval / statistics.StoreHeartBeatReportInterval) - return c.hotStat.RegionStats(statistics.Read, threshold) -} - -// BucketsStats returns hot region's buckets stats. -func (c *RaftCluster) BucketsStats(degree int, regionIDs ...uint64) map[uint64][]*buckets.BucketStat { - task := buckets.NewCollectBucketStatsTask(degree, regionIDs...) - if !c.hotBuckets.CheckAsync(task) { - return nil - } - return task.WaitRet(c.ctx) + (utils.RegionHeartBeatReportInterval / utils.StoreHeartBeatReportInterval) + return c.hotStat.RegionStats(utils.Read, threshold) } // RegionWriteStats returns hot region's write stats. // The result only includes peers that are hot enough. func (c *RaftCluster) RegionWriteStats() map[uint64][]*statistics.HotPeerStat { // RegionStats is a thread-safe method - return c.hotStat.RegionStats(statistics.Write, c.GetOpts().GetHotRegionCacheHitsThreshold()) + return c.hotStat.RegionStats(utils.Write, c.GetOpts().GetHotRegionCacheHitsThreshold()) +} + +// BucketsStats returns hot region's buckets stats. +func (c *RaftCluster) BucketsStats(degree int, regionIDs ...uint64) map[uint64][]*buckets.BucketStat { + return c.hotStat.BucketsStats(degree, regionIDs...) } // TODO: remove me. @@ -2287,7 +2354,7 @@ func (c *RaftCluster) putRegion(region *core.RegionInfo) error { // GetHotWriteRegions gets hot write regions' info. func (c *RaftCluster) GetHotWriteRegions(storeIDs ...uint64) *statistics.StoreHotPeersInfos { - hotWriteRegions := c.coordinator.GetHotRegionsByType(statistics.Write) + hotWriteRegions := c.coordinator.GetHotRegionsByType(utils.Write) if len(storeIDs) > 0 && hotWriteRegions != nil { hotWriteRegions = getHotRegionsByStoreIDs(hotWriteRegions, storeIDs...) } @@ -2296,7 +2363,7 @@ func (c *RaftCluster) GetHotWriteRegions(storeIDs ...uint64) *statistics.StoreHo // GetHotReadRegions gets hot read regions' info. func (c *RaftCluster) GetHotReadRegions(storeIDs ...uint64) *statistics.StoreHotPeersInfos { - hotReadRegions := c.coordinator.GetHotRegionsByType(statistics.Read) + hotReadRegions := c.coordinator.GetHotRegionsByType(utils.Read) if len(storeIDs) > 0 && hotReadRegions != nil { hotReadRegions = getHotRegionsByStoreIDs(hotReadRegions, storeIDs...) } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 959dba6ee74..e5bf862174b 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -20,6 +20,8 @@ import ( "fmt" "math" "math/rand" + "net/http" + "net/http/httptest" "sync" "testing" "time" @@ -49,6 +51,7 @@ import ( "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/utils/testutil" @@ -150,23 +153,23 @@ func TestStoreHeartbeat(t *testing.T) { re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) re.Equal("v1", cluster.GetStore(1).GetStoreLimit().Version()) time.Sleep(20 * time.Millisecond) - storeStats := cluster.hotStat.RegionStats(statistics.Read, 3) + storeStats := cluster.hotStat.RegionStats(utils.Read, 3) re.Len(storeStats[1], 1) re.Equal(uint64(1), storeStats[1][0].RegionID) interval := float64(hotHeartBeat.Interval.EndTimestamp - hotHeartBeat.Interval.StartTimestamp) - re.Len(storeStats[1][0].Loads, statistics.DimLen) - re.Equal(float64(hotHeartBeat.PeerStats[0].ReadBytes)/interval, storeStats[1][0].Loads[statistics.ByteDim]) - re.Equal(float64(hotHeartBeat.PeerStats[0].ReadKeys)/interval, storeStats[1][0].Loads[statistics.KeyDim]) - re.Equal(float64(hotHeartBeat.PeerStats[0].QueryStats.Get)/interval, storeStats[1][0].Loads[statistics.QueryDim]) + re.Len(storeStats[1][0].Loads, utils.DimLen) + re.Equal(float64(hotHeartBeat.PeerStats[0].ReadBytes)/interval, storeStats[1][0].Loads[utils.ByteDim]) + re.Equal(float64(hotHeartBeat.PeerStats[0].ReadKeys)/interval, storeStats[1][0].Loads[utils.KeyDim]) + re.Equal(float64(hotHeartBeat.PeerStats[0].QueryStats.Get)/interval, storeStats[1][0].Loads[utils.QueryDim]) // After cold heartbeat, we won't find region 1 peer in regionStats re.NoError(cluster.HandleStoreHeartbeat(coldReq, coldResp)) time.Sleep(20 * time.Millisecond) - storeStats = cluster.hotStat.RegionStats(statistics.Read, 1) + storeStats = cluster.hotStat.RegionStats(utils.Read, 1) re.Empty(storeStats[1]) // After hot heartbeat, we can find region 1 peer again re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) time.Sleep(20 * time.Millisecond) - storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) + storeStats = cluster.hotStat.RegionStats(utils.Read, 3) re.Len(storeStats[1], 1) re.Equal(uint64(1), storeStats[1][0].RegionID) // after several cold heartbeats, and one hot heartbeat, we also can't find region 1 peer @@ -174,19 +177,19 @@ func TestStoreHeartbeat(t *testing.T) { re.NoError(cluster.HandleStoreHeartbeat(coldReq, coldResp)) re.NoError(cluster.HandleStoreHeartbeat(coldReq, coldResp)) time.Sleep(20 * time.Millisecond) - storeStats = cluster.hotStat.RegionStats(statistics.Read, 0) + storeStats = cluster.hotStat.RegionStats(utils.Read, 0) re.Empty(storeStats[1]) re.Nil(cluster.HandleStoreHeartbeat(hotReq, hotResp)) time.Sleep(20 * time.Millisecond) - storeStats = cluster.hotStat.RegionStats(statistics.Read, 1) + storeStats = cluster.hotStat.RegionStats(utils.Read, 1) re.Len(storeStats[1], 0) - storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) + storeStats = cluster.hotStat.RegionStats(utils.Read, 3) re.Empty(storeStats[1]) // after 2 hot heartbeats, wo can find region 1 peer again re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) time.Sleep(20 * time.Millisecond) - storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) + storeStats = cluster.hotStat.RegionStats(utils.Read, 3) re.Len(storeStats[1], 1) re.Equal(uint64(1), storeStats[1][0].RegionID) } @@ -624,14 +627,14 @@ func TestRegionHeartbeatHotStat(t *testing.T) { EndKey: []byte{byte(1 + 1)}, RegionEpoch: &metapb.RegionEpoch{ConfVer: 2, Version: 2}, } - region := core.NewRegionInfo(regionMeta, leader, core.WithInterval(&pdpb.TimeInterval{StartTimestamp: 0, EndTimestamp: statistics.RegionHeartBeatReportInterval}), + region := core.NewRegionInfo(regionMeta, leader, core.WithInterval(&pdpb.TimeInterval{StartTimestamp: 0, EndTimestamp: utils.RegionHeartBeatReportInterval}), core.SetWrittenBytes(30000*10), core.SetWrittenKeys(300000*10)) err = cluster.processRegionHeartbeat(region) re.NoError(err) // wait HotStat to update items time.Sleep(time.Second) - stats := cluster.hotStat.RegionStats(statistics.Write, 0) + stats := cluster.hotStat.RegionStats(utils.Write, 0) re.Len(stats[1], 1) re.Len(stats[2], 1) re.Len(stats[3], 1) @@ -644,7 +647,7 @@ func TestRegionHeartbeatHotStat(t *testing.T) { re.NoError(err) // wait HotStat to update items time.Sleep(time.Second) - stats = cluster.hotStat.RegionStats(statistics.Write, 0) + stats = cluster.hotStat.RegionStats(utils.Write, 0) re.Len(stats[1], 1) re.Empty(stats[2]) re.Len(stats[3], 1) @@ -696,14 +699,12 @@ func TestBucketHeartbeat(t *testing.T) { // case5: region update should inherit buckets. newRegion := regions[1].Clone(core.WithIncConfVer(), core.SetBuckets(nil)) - cluster.storeConfigManager = config.NewTestStoreConfigManager(nil) - config := cluster.storeConfigManager.GetStoreConfig() - config.Coprocessor.EnableRegionBucket = true + opt.SetRegionBucketEnabled(true) re.NoError(cluster.processRegionHeartbeat(newRegion)) re.Len(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), 2) // case6: disable region bucket in - config.Coprocessor.EnableRegionBucket = false + opt.SetRegionBucketEnabled(false) newRegion2 := regions[1].Clone(core.WithIncConfVer(), core.SetBuckets(nil)) re.NoError(cluster.processRegionHeartbeat(newRegion2)) re.Nil(cluster.GetRegion(uint64(1)).GetBuckets()) @@ -850,6 +851,16 @@ func TestRegionHeartbeat(t *testing.T) { regions[i] = region re.NoError(cluster.processRegionHeartbeat(region)) checkRegions(re, cluster.core, regions[:i+1]) + + // Flashback + region = region.Clone(core.WithFlashback(true, 1)) + regions[i] = region + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core, regions[:i+1]) + region = region.Clone(core.WithFlashback(false, 0)) + regions[i] = region + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core, regions[:i+1]) } regionCounts := make(map[uint64]int) @@ -982,8 +993,7 @@ func TestRegionSizeChanged(t *testing.T) { cluster.regionStats = statistics.NewRegionStatistics( cluster.GetBasicCluster(), cluster.GetOpts(), - cluster.ruleManager, - cluster.storeConfigManager) + cluster.ruleManager) region := newTestRegions(1, 3, 3)[0] cluster.opt.GetMaxMergeRegionKeys() curMaxMergeSize := int64(cluster.opt.GetMaxMergeRegionSize()) @@ -1268,8 +1278,7 @@ func TestOfflineAndMerge(t *testing.T) { cluster.regionStats = statistics.NewRegionStatistics( cluster.GetBasicCluster(), cluster.GetOpts(), - cluster.ruleManager, - cluster.storeConfigManager) + cluster.ruleManager) cluster.coordinator = schedule.NewCoordinator(ctx, cluster, nil) // Put 4 stores. @@ -1319,7 +1328,7 @@ func TestOfflineAndMerge(t *testing.T) { } } -func TestSyncConfig(t *testing.T) { +func TestStoreConfigUpdate(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1332,38 +1341,134 @@ func TestSyncConfig(t *testing.T) { re.NoError(tc.putStoreLocked(s)) } re.Len(tc.getUpStores(), 5) - - testdata := []struct { - whiteList []string - maxRegionSize uint64 - updated bool - }{ - { - whiteList: []string{}, - maxRegionSize: uint64(144), - updated: false, - }, { - whiteList: []string{"127.0.0.1:5"}, - maxRegionSize: uint64(10), - updated: true, + // Case1: big region. + { + body := `{ "coprocessor": { + "split-region-on-table": false, + "batch-split-limit": 2, + "region-max-size": "15GiB", + "region-split-size": "10GiB", + "region-max-keys": 144000000, + "region-split-keys": 96000000, + "consistency-check-method": "mvcc", + "perf-level": 2 + }}` + var config sc.StoreConfig + re.NoError(json.Unmarshal([]byte(body), &config)) + tc.updateStoreConfig(opt.GetStoreConfig(), &config) + re.Equal(uint64(144000000), opt.GetRegionMaxKeys()) + re.Equal(uint64(96000000), opt.GetRegionSplitKeys()) + re.Equal(uint64(15*units.GiB/units.MiB), opt.GetRegionMaxSize()) + re.Equal(uint64(10*units.GiB/units.MiB), opt.GetRegionSplitSize()) + } + // Case2: empty config. + { + body := `{}` + var config sc.StoreConfig + re.NoError(json.Unmarshal([]byte(body), &config)) + tc.updateStoreConfig(opt.GetStoreConfig(), &config) + re.Equal(uint64(1440000), opt.GetRegionMaxKeys()) + re.Equal(uint64(960000), opt.GetRegionSplitKeys()) + re.Equal(uint64(144), opt.GetRegionMaxSize()) + re.Equal(uint64(96), opt.GetRegionSplitSize()) + } + // Case3: raft-kv2 config. + { + body := `{ "coprocessor": { + "split-region-on-table":false, + "batch-split-limit":10, + "region-max-size":"384MiB", + "region-split-size":"256MiB", + "region-max-keys":3840000, + "region-split-keys":2560000, + "consistency-check-method":"mvcc", + "enable-region-bucket":true, + "region-bucket-size":"96MiB", + "region-size-threshold-for-approximate":"384MiB", + "region-bucket-merge-size-ratio":0.33 }, + "storage":{ + "engine":"raft-kv2" + }}` + var config sc.StoreConfig + re.NoError(json.Unmarshal([]byte(body), &config)) + tc.updateStoreConfig(opt.GetStoreConfig(), &config) + re.Equal(uint64(96), opt.GetRegionBucketSize()) + re.True(opt.IsRaftKV2()) } +} + +func TestSyncConfigContext(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, opt, err := newTestScheduleConfig() + re.NoError(err) + tc := newTestCluster(ctx, opt) + tc.httpClient = &http.Client{} - for _, v := range testdata { - tc.storeConfigManager = config.NewTestStoreConfigManager(v.whiteList) - re.Equal(uint64(144), tc.GetStoreConfig().GetRegionMaxSize()) - success, switchRaftV2 := syncConfig(tc.storeConfigManager, tc.GetStores()) - re.Equal(v.updated, success) - if v.updated { - re.True(switchRaftV2) - tc.opt.UseRaftV2() - re.EqualValues(512, tc.opt.GetMaxMovableHotPeerSize()) - success, switchRaftV2 = syncConfig(tc.storeConfigManager, tc.GetStores()) - re.True(success) - re.False(switchRaftV2) + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(time.Second * 100) + cfg := &sc.StoreConfig{} + b, err := json.Marshal(cfg) + if err != nil { + res.WriteHeader(http.StatusInternalServerError) + res.Write([]byte(fmt.Sprintf("failed setting up test server: %s", err))) + return } - re.Equal(v.maxRegionSize, tc.GetStoreConfig().GetRegionMaxSize()) + + res.WriteHeader(http.StatusOK) + res.Write(b) + })) + stores := newTestStores(1, "2.0.0") + for _, s := range stores { + re.NoError(tc.putStoreLocked(s)) + } + // trip schema header + now := time.Now() + stores[0].GetMeta().StatusAddress = server.URL[7:] + synced, _ := tc.syncStoreConfig(tc.GetStores()) + re.False(synced) + re.Less(time.Since(now), clientTimeout*2) +} + +func TestStoreConfigSync(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, opt, err := newTestScheduleConfig() + re.NoError(err) + tc := newTestCluster(ctx, opt) + stores := newTestStores(5, "2.0.0") + for _, s := range stores { + re.NoError(tc.putStoreLocked(s)) } + re.Len(tc.getUpStores(), 5) + + re.Equal(uint64(144), tc.GetStoreConfig().GetRegionMaxSize()) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/mockFetchStoreConfigFromTiKV", `return("10MiB")`)) + // switchRaftV2 will be true. + synced, switchRaftV2 := tc.syncStoreConfig(tc.GetStores()) + re.True(synced) + re.True(switchRaftV2) + re.EqualValues(512, tc.opt.GetMaxMovableHotPeerSize()) + re.Equal(uint64(10), tc.GetStoreConfig().GetRegionMaxSize()) + // switchRaftV2 will be false this time. + synced, switchRaftV2 = tc.syncStoreConfig(tc.GetStores()) + re.True(synced) + re.False(switchRaftV2) + re.Equal(uint64(10), tc.GetStoreConfig().GetRegionMaxSize()) + re.NoError(opt.Persist(tc.GetStorage())) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/mockFetchStoreConfigFromTiKV")) + + // Check the persistence of the store config. + opt = config.NewPersistOptions(&config.Config{}) + re.Empty(opt.GetStoreConfig()) + err = opt.Reload(tc.GetStorage()) + re.NoError(err) + re.Equal(tc.GetOpts().(*config.PersistOptions).GetStoreConfig(), opt.GetStoreConfig()) } func TestUpdateStorePendingPeerCount(t *testing.T) { @@ -1526,8 +1631,7 @@ func TestCalculateStoreSize1(t *testing.T) { cluster.regionStats = statistics.NewRegionStatistics( cluster.GetBasicCluster(), cluster.GetOpts(), - cluster.ruleManager, - cluster.storeConfigManager) + cluster.ruleManager) // Put 10 stores. for i, store := range newTestStores(10, "6.0.0") { @@ -1613,8 +1717,7 @@ func TestCalculateStoreSize2(t *testing.T) { cluster.regionStats = statistics.NewRegionStatistics( cluster.GetBasicCluster(), cluster.GetOpts(), - cluster.ruleManager, - cluster.storeConfigManager) + cluster.ruleManager) // Put 10 stores. for i, store := range newTestStores(10, "6.0.0") { @@ -2371,8 +2474,7 @@ func TestCollectMetricsConcurrent(t *testing.T) { tc.regionStats = statistics.NewRegionStatistics( tc.GetBasicCluster(), tc.GetOpts(), - nil, - tc.storeConfigManager) + nil) }, func(co *schedule.Coordinator) { co.Run() }, re) defer cleanup() @@ -2407,8 +2509,7 @@ func TestCollectMetrics(t *testing.T) { tc.regionStats = statistics.NewRegionStatistics( tc.GetBasicCluster(), tc.GetOpts(), - nil, - tc.storeConfigManager) + nil) }, func(co *schedule.Coordinator) { co.Run() }, re) defer cleanup() count := 10 @@ -2419,9 +2520,9 @@ func TestCollectMetrics(t *testing.T) { RegionID: uint64(i*1000 + k), Loads: []float64{10, 20, 30}, HotDegree: 10, - AntiCount: statistics.HotRegionAntiCount, // for write + AntiCount: utils.HotRegionAntiCount, // for write } - tc.hotStat.HotCache.Update(item, statistics.Write) + tc.hotStat.HotCache.Update(item, utils.Write) } } controller := co.GetSchedulersController() @@ -2433,7 +2534,7 @@ func TestCollectMetrics(t *testing.T) { stores := co.GetCluster().GetStores() regionStats := co.GetCluster().RegionWriteStats() status1 := statistics.CollectHotPeerInfos(stores, regionStats) - status2 := statistics.GetHotStatus(stores, co.GetCluster().GetStoresLoads(), regionStats, statistics.Write, co.GetCluster().GetSchedulerConfig().IsTraceRegionFlow()) + status2 := statistics.GetHotStatus(stores, co.GetCluster().GetStoresLoads(), regionStats, utils.Write, co.GetCluster().GetSchedulerConfig().IsTraceRegionFlow()) for _, s := range status2.AsLeader { s.Stats = nil } @@ -2998,7 +3099,7 @@ func TestPersistScheduler(t *testing.T) { re.NoError(controller.RemoveScheduler(schedulers.BalanceWitnessName)) re.NoError(controller.RemoveScheduler(schedulers.TransferWitnessLeaderName)) re.Len(controller.GetSchedulerNames(), defaultCount-3) - re.NoError(co.GetCluster().GetPersistOptions().Persist(storage)) + re.NoError(co.GetCluster().GetSchedulerConfig().Persist(storage)) co.Stop() co.GetSchedulersController().Wait() co.GetWaitGroup().Wait() @@ -3051,12 +3152,12 @@ func TestPersistScheduler(t *testing.T) { // the scheduler option should contain 6 items // the `hot scheduler` are disabled - re.Len(co.GetCluster().GetPersistOptions().GetSchedulers(), defaultCount+3) + re.Len(co.GetCluster().GetSchedulerConfig().(*config.PersistOptions).GetSchedulers(), defaultCount+3) re.NoError(controller.RemoveScheduler(schedulers.GrantLeaderName)) // the scheduler that is not enable by default will be completely deleted - re.Len(co.GetCluster().GetPersistOptions().GetSchedulers(), defaultCount+2) + re.Len(co.GetCluster().GetSchedulerConfig().(*config.PersistOptions).GetSchedulers(), defaultCount+2) re.Len(controller.GetSchedulerNames(), 4) - re.NoError(co.GetCluster().GetPersistOptions().Persist(co.GetCluster().GetStorage())) + re.NoError(co.GetCluster().GetSchedulerConfig().Persist(co.GetCluster().GetStorage())) co.Stop() co.GetSchedulersController().Wait() co.GetWaitGroup().Wait() @@ -3113,7 +3214,7 @@ func TestRemoveScheduler(t *testing.T) { re.NoError(err) re.Empty(sches) re.Empty(controller.GetSchedulerNames()) - re.NoError(co.GetCluster().GetPersistOptions().Persist(co.GetCluster().GetStorage())) + re.NoError(co.GetCluster().GetSchedulerConfig().Persist(co.GetCluster().GetStorage())) co.Stop() co.GetSchedulersController().Wait() co.GetWaitGroup().Wait() @@ -3127,7 +3228,7 @@ func TestRemoveScheduler(t *testing.T) { co.Run() re.Empty(controller.GetSchedulerNames()) // the option remains default scheduler - re.Len(co.GetCluster().GetPersistOptions().GetSchedulers(), defaultCount) + re.Len(co.GetCluster().GetSchedulerConfig().(*config.PersistOptions).GetSchedulers(), defaultCount) co.Stop() co.GetSchedulersController().Wait() co.GetWaitGroup().Wait() diff --git a/server/cluster/cluster_worker.go b/server/cluster/cluster_worker.go index 51781bde7f6..82113bc1656 100644 --- a/server/cluster/cluster_worker.go +++ b/server/cluster/cluster_worker.go @@ -250,6 +250,6 @@ func (c *RaftCluster) HandleReportBuckets(b *metapb.Buckets) error { if err := c.processReportBuckets(b); err != nil { return err } - c.hotBuckets.CheckAsync(buckets.NewCheckPeerTask(b)) + c.hotStat.CheckAsync(buckets.NewCheckPeerTask(b)) return nil } diff --git a/server/cluster/store_limiter.go b/server/cluster/store_limiter.go index 4e77590ddc5..b40bcef2eed 100644 --- a/server/cluster/store_limiter.go +++ b/server/cluster/store_limiter.go @@ -16,21 +16,21 @@ package cluster import ( "github.com/tikv/pd/pkg/core/storelimit" + sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/utils/syncutil" - "github.com/tikv/pd/server/config" ) // StoreLimiter adjust the store limit dynamically type StoreLimiter struct { m syncutil.RWMutex - opt *config.PersistOptions + opt sc.ConfProvider scene map[storelimit.Type]*storelimit.Scene state *State current LoadState } // NewStoreLimiter builds a store limiter object using the operator controller -func NewStoreLimiter(opt *config.PersistOptions) *StoreLimiter { +func NewStoreLimiter(opt sc.ConfProvider) *StoreLimiter { defaultScene := map[storelimit.Type]*storelimit.Scene{ storelimit.AddPeer: storelimit.DefaultScene(storelimit.AddPeer), storelimit.RemovePeer: storelimit.DefaultScene(storelimit.RemovePeer), diff --git a/server/config/persist_options.go b/server/config/persist_options.go index 228351775e4..1ea0b79424f 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -52,6 +52,7 @@ type PersistOptions struct { replicationMode atomic.Value labelProperty atomic.Value keyspace atomic.Value + storeConfig atomic.Value clusterVersion unsafe.Pointer } @@ -64,6 +65,9 @@ func NewPersistOptions(cfg *Config) *PersistOptions { o.replicationMode.Store(&cfg.ReplicationMode) o.labelProperty.Store(cfg.LabelProperty) o.keyspace.Store(&cfg.Keyspace) + // storeConfig will be fetched from TiKV later, + // set it to an empty config here first. + o.storeConfig.Store(&sc.StoreConfig{}) o.SetClusterVersion(&cfg.ClusterVersion) o.ttl = nil return o @@ -129,6 +133,16 @@ func (o *PersistOptions) SetKeyspaceConfig(cfg *KeyspaceConfig) { o.keyspace.Store(cfg) } +// GetStoreConfig returns the store config. +func (o *PersistOptions) GetStoreConfig() *sc.StoreConfig { + return o.storeConfig.Load().(*sc.StoreConfig) +} + +// SetStoreConfig sets the store configuration. +func (o *PersistOptions) SetStoreConfig(cfg *sc.StoreConfig) { + o.storeConfig.Store(cfg) +} + // GetClusterVersion returns the cluster version. func (o *PersistOptions) GetClusterVersion() *semver.Version { return (*semver.Version)(atomic.LoadPointer(&o.clusterVersion)) @@ -202,9 +216,6 @@ func (o *PersistOptions) SetMaxReplicas(replicas int) { o.SetReplicationConfig(v) } -// UseRaftV2 set some config for raft store v2 by default temporary. -func (o *PersistOptions) UseRaftV2() {} - const ( maxSnapshotCountKey = "schedule.max-snapshot-count" maxMergeRegionSizeKey = "schedule.max-merge-region-size" @@ -602,16 +613,16 @@ func (o *PersistOptions) IsRemoveExtraReplicaEnabled() bool { return o.GetScheduleConfig().EnableRemoveExtraReplica } -// IsTikvRegionSplitEnabled returns whether tikv split region is disabled. -func (o *PersistOptions) IsTikvRegionSplitEnabled() bool { - return o.getTTLBoolOr(enableTiKVSplitRegion, o.GetScheduleConfig().EnableTiKVSplitRegion) -} - // IsLocationReplacementEnabled returns if location replace is enabled. func (o *PersistOptions) IsLocationReplacementEnabled() bool { return o.getTTLBoolOr(enableLocationReplacement, o.GetScheduleConfig().EnableLocationReplacement) } +// IsTikvRegionSplitEnabled returns whether tikv split region is disabled. +func (o *PersistOptions) IsTikvRegionSplitEnabled() bool { + return o.getTTLBoolOr(enableTiKVSplitRegion, o.GetScheduleConfig().EnableTiKVSplitRegion) +} + // GetMaxMovableHotPeerSize returns the max movable hot peer size. func (o *PersistOptions) GetMaxMovableHotPeerSize() int64 { return o.GetScheduleConfig().MaxMovableHotPeerSize @@ -744,16 +755,26 @@ func (o *PersistOptions) DeleteLabelProperty(typ, labelKey, labelValue string) { o.labelProperty.Store(cfg) } +// persistedConfig is used to merge all configs into one before saving to storage. +type persistedConfig struct { + *Config + // StoreConfig is injected into Config to avoid breaking the original API. + StoreConfig sc.StoreConfig `json:"store"` +} + // Persist saves the configuration to the storage. func (o *PersistOptions) Persist(storage endpoint.ConfigStorage) error { - cfg := &Config{ - Schedule: *o.GetScheduleConfig(), - Replication: *o.GetReplicationConfig(), - PDServerCfg: *o.GetPDServerConfig(), - ReplicationMode: *o.GetReplicationModeConfig(), - LabelProperty: o.GetLabelPropertyConfig(), - Keyspace: *o.GetKeyspaceConfig(), - ClusterVersion: *o.GetClusterVersion(), + cfg := &persistedConfig{ + Config: &Config{ + Schedule: *o.GetScheduleConfig(), + Replication: *o.GetReplicationConfig(), + PDServerCfg: *o.GetPDServerConfig(), + ReplicationMode: *o.GetReplicationModeConfig(), + LabelProperty: o.GetLabelPropertyConfig(), + Keyspace: *o.GetKeyspaceConfig(), + ClusterVersion: *o.GetClusterVersion(), + }, + StoreConfig: *o.GetStoreConfig(), } err := storage.SaveConfig(cfg) failpoint.Inject("persistFail", func() { @@ -764,8 +785,8 @@ func (o *PersistOptions) Persist(storage endpoint.ConfigStorage) error { // Reload reloads the configuration from the storage. func (o *PersistOptions) Reload(storage endpoint.ConfigStorage) error { - cfg := &Config{} - // pass nil to initialize cfg to default values (all items undefined) + cfg := &persistedConfig{Config: &Config{}} + // Pass nil to initialize cfg to default values (all items undefined) cfg.Adjust(nil, true) isExist, err := storage.LoadConfig(cfg) @@ -773,6 +794,8 @@ func (o *PersistOptions) Reload(storage endpoint.ConfigStorage) error { return err } o.adjustScheduleCfg(&cfg.Schedule) + // Some fields may not be stored in the storage, we need to calculate them manually. + cfg.StoreConfig.Adjust() cfg.PDServerCfg.MigrateDeprecatedFlags() if isExist { o.schedule.Store(&cfg.Schedule) @@ -781,6 +804,7 @@ func (o *PersistOptions) Reload(storage endpoint.ConfigStorage) error { o.replicationMode.Store(&cfg.ReplicationMode) o.labelProperty.Store(cfg.LabelProperty) o.keyspace.Store(&cfg.Keyspace) + o.storeConfig.Store(&cfg.StoreConfig) o.SetClusterVersion(&cfg.ClusterVersion) } return nil @@ -957,3 +981,55 @@ func (o *PersistOptions) IsSchedulingHalted() bool { } return o.GetScheduleConfig().HaltScheduling } + +// GetRegionMaxSize returns the max region size in MB +func (o *PersistOptions) GetRegionMaxSize() uint64 { + return o.GetStoreConfig().GetRegionMaxSize() +} + +// GetRegionMaxKeys returns the region split keys +func (o *PersistOptions) GetRegionMaxKeys() uint64 { + return o.GetStoreConfig().GetRegionMaxKeys() +} + +// GetRegionSplitSize returns the region split size in MB +func (o *PersistOptions) GetRegionSplitSize() uint64 { + return o.GetStoreConfig().GetRegionSplitSize() +} + +// GetRegionSplitKeys returns the region split keys +func (o *PersistOptions) GetRegionSplitKeys() uint64 { + return o.GetStoreConfig().GetRegionSplitKeys() +} + +// CheckRegionSize return error if the smallest region's size is less than mergeSize +func (o *PersistOptions) CheckRegionSize(size, mergeSize uint64) error { + return o.GetStoreConfig().CheckRegionSize(size, mergeSize) +} + +// CheckRegionKeys return error if the smallest region's keys is less than mergeKeys +func (o *PersistOptions) CheckRegionKeys(keys, mergeKeys uint64) error { + return o.GetStoreConfig().CheckRegionKeys(keys, mergeKeys) +} + +// IsEnableRegionBucket return true if the region bucket is enabled. +func (o *PersistOptions) IsEnableRegionBucket() bool { + return o.GetStoreConfig().IsEnableRegionBucket() +} + +// IsRaftKV2 returns true if the raft kv is v2. +func (o *PersistOptions) IsRaftKV2() bool { + return o.GetStoreConfig().IsRaftKV2() +} + +// SetRegionBucketEnabled sets if the region bucket is enabled. +func (o *PersistOptions) SetRegionBucketEnabled(enabled bool) { + cfg := o.GetStoreConfig().Clone() + cfg.SetRegionBucketEnabled(enabled) + o.SetStoreConfig(cfg) +} + +// GetRegionBucketSize returns the region bucket size. +func (o *PersistOptions) GetRegionBucketSize() uint64 { + return o.GetStoreConfig().GetRegionBucketSize() +} diff --git a/server/config/store_config_test.go b/server/config/store_config_test.go deleted file mode 100644 index 7cc9119f4d5..00000000000 --- a/server/config/store_config_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2022 TiKV Project Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "crypto/tls" - "encoding/json" - "net/http" - "testing" - - "github.com/docker/go-units" - "github.com/stretchr/testify/require" -) - -func TestTiKVConfig(t *testing.T) { - re := require.New(t) - m := NewStoreConfigManager(nil) - // case1: big region. - { - body := `{ "coprocessor": { - "split-region-on-table": false, - "batch-split-limit": 2, - "region-max-size": "15GiB", - "region-split-size": "10GiB", - "region-max-keys": 144000000, - "region-split-keys": 96000000, - "consistency-check-method": "mvcc", - "perf-level": 2 - }}` - var config StoreConfig - re.NoError(json.Unmarshal([]byte(body), &config)) - m.update(&config) - re.Equal(uint64(144000000), config.GetRegionMaxKeys()) - re.Equal(uint64(96000000), config.GetRegionSplitKeys()) - re.Equal(15*units.GiB/units.MiB, int(config.GetRegionMaxSize())) - re.Equal(uint64(10*units.GiB/units.MiB), config.GetRegionSplitSize()) - } - //case2: empty config. - { - body := `{}` - var config StoreConfig - re.NoError(json.Unmarshal([]byte(body), &config)) - - re.Equal(uint64(1440000), config.GetRegionMaxKeys()) - re.Equal(uint64(960000), config.GetRegionSplitKeys()) - re.Equal(144, int(config.GetRegionMaxSize())) - re.Equal(uint64(96), config.GetRegionSplitSize()) - } -} - -func TestUpdateConfig(t *testing.T) { - re := require.New(t) - manager := NewTestStoreConfigManager([]string{"tidb.com"}) - manager.ObserveConfig("tikv.com") - re.Equal(uint64(144), manager.GetStoreConfig().GetRegionMaxSize()) - re.NotEqual(raftStoreV2, manager.GetStoreConfig().GetRegionMaxSize()) - manager.ObserveConfig("tidb.com") - re.Equal(uint64(10), manager.GetStoreConfig().GetRegionMaxSize()) - re.Equal(raftStoreV2, manager.GetStoreConfig().Engine) - - // case2: the config should not update if config is same expect some ignore field. - c, err := manager.source.GetConfig("tidb.com") - re.NoError(err) - re.True(manager.GetStoreConfig().Equal(c)) - - client := &http.Client{ - Transport: &http.Transport{ - DisableKeepAlives: true, - TLSClientConfig: &tls.Config{}, - }, - } - manager = NewStoreConfigManager(client) - re.Equal("http", manager.source.(*TiKVConfigSource).schema) -} - -func TestParseConfig(t *testing.T) { - re := require.New(t) - m := NewStoreConfigManager(nil) - body := ` -{ -"coprocessor":{ -"split-region-on-table":false, -"batch-split-limit":10, -"region-max-size":"384MiB", -"region-split-size":"256MiB", -"region-max-keys":3840000, -"region-split-keys":2560000, -"consistency-check-method":"mvcc", -"enable-region-bucket":true, -"region-bucket-size":"96MiB", -"region-size-threshold-for-approximate":"384MiB", -"region-bucket-merge-size-ratio":0.33 -}, -"storage":{ - "engine":"raft-kv2" -} -} -` - - var config StoreConfig - re.NoError(json.Unmarshal([]byte(body), &config)) - m.update(&config) - re.Equal(uint64(96), config.GetRegionBucketSize()) - re.True(config.IsRaftKV2()) - re.Equal(raftStoreV2, config.Storage.Engine) -} - -func TestMergeCheck(t *testing.T) { - re := require.New(t) - testdata := []struct { - size uint64 - mergeSize uint64 - keys uint64 - mergeKeys uint64 - pass bool - }{{ - // case 1: the merged region size is smaller than the max region size - size: 96 + 20, - mergeSize: 20, - keys: 1440000 + 200000, - mergeKeys: 200000, - pass: true, - }, { - // case 2: the smallest region is 68MiB, it can't be merged again. - size: 144 + 20, - mergeSize: 20, - keys: 1440000 + 200000, - mergeKeys: 200000, - pass: true, - }, { - // case 3: the smallest region is 50MiB, it can be merged again. - size: 144 + 2, - mergeSize: 50, - keys: 1440000 + 20000, - mergeKeys: 500000, - pass: false, - }, { - // case4: the smallest region is 51MiB, it can't be merged again. - size: 144 + 3, - mergeSize: 50, - keys: 1440000 + 30000, - mergeKeys: 500000, - pass: true, - }} - config := &StoreConfig{} - for _, v := range testdata { - if v.pass { - re.NoError(config.CheckRegionSize(v.size, v.mergeSize)) - re.NoError(config.CheckRegionKeys(v.keys, v.mergeKeys)) - } else { - re.Error(config.CheckRegionSize(v.size, v.mergeSize)) - re.Error(config.CheckRegionKeys(v.keys, v.mergeKeys)) - } - } -} diff --git a/server/gc_service.go b/server/gc_service.go index fcb0550c15f..d8a0158920d 100644 --- a/server/gc_service.go +++ b/server/gc_service.go @@ -114,7 +114,7 @@ func (s *GrpcServer) UpdateServiceSafePointV2(ctx context.Context, request *pdpb if s.IsAPIServiceMode() { nowTSO, err = s.getGlobalTSOFromTSOServer(ctx) } else { - nowTSO, err = s.tsoAllocatorManager.HandleRequest(tso.GlobalDCLocation, 1) + nowTSO, err = s.tsoAllocatorManager.HandleRequest(ctx, tso.GlobalDCLocation, 1) } if err != nil { return nil, err diff --git a/server/grpc_service.go b/server/grpc_service.go index 62a8f8005ce..4bc63224401 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "path" + "runtime/trace" "strconv" "strings" "sync" @@ -148,7 +149,7 @@ func (s *GrpcServer) GetMinTS( minTS, err = s.GetMinTSFromTSOService(tso.GlobalDCLocation) } else { start := time.Now() - ts, internalErr := s.tsoAllocatorManager.HandleRequest(tso.GlobalDCLocation, 1) + ts, internalErr := s.tsoAllocatorManager.HandleRequest(ctx, tso.GlobalDCLocation, 1) if internalErr == nil { tsoHandleDuration.Observe(time.Since(start).Seconds()) } @@ -386,7 +387,9 @@ func (s *GrpcServer) Tso(stream pdpb.PD_TsoServer) error { "mismatch cluster id, need %d but got %d", s.clusterID, request.GetHeader().GetClusterId()) } count := request.GetCount() - ts, err := s.tsoAllocatorManager.HandleRequest(request.GetDcLocation(), count) + ctx, task := trace.NewTask(ctx, "tso") + ts, err := s.tsoAllocatorManager.HandleRequest(ctx, request.GetDcLocation(), count) + task.End() if err != nil { return status.Errorf(codes.Unknown, err.Error()) } @@ -1732,7 +1735,7 @@ func (s *GrpcServer) UpdateServiceGCSafePoint(ctx context.Context, request *pdpb if s.IsAPIServiceMode() { nowTSO, err = s.getGlobalTSOFromTSOServer(ctx) } else { - nowTSO, err = s.tsoAllocatorManager.HandleRequest(tso.GlobalDCLocation, 1) + nowTSO, err = s.tsoAllocatorManager.HandleRequest(ctx, tso.GlobalDCLocation, 1) } if err != nil { return nil, err @@ -2493,7 +2496,7 @@ func (s *GrpcServer) SetExternalTimestamp(ctx context.Context, request *pdpb.Set if s.IsAPIServiceMode() { nowTSO, err = s.getGlobalTSOFromTSOServer(ctx) } else { - nowTSO, err = s.tsoAllocatorManager.HandleRequest(tso.GlobalDCLocation, 1) + nowTSO, err = s.tsoAllocatorManager.HandleRequest(ctx, tso.GlobalDCLocation, 1) } if err != nil { return nil, err diff --git a/server/server.go b/server/server.go index a8ff28ef98c..1fdcd8497f0 100644 --- a/server/server.go +++ b/server/server.go @@ -855,6 +855,12 @@ func (s *Server) GetKeyspaceManager() *keyspace.Manager { return s.keyspaceManager } +// SetKeyspaceManager sets the keyspace manager of server. +// Note: it is only used for test. +func (s *Server) SetKeyspaceManager(keyspaceManager *keyspace.Manager) { + s.keyspaceManager = keyspaceManager +} + // GetSafePointV2Manager returns the safe point v2 manager of server. func (s *Server) GetSafePointV2Manager() *gc.SafePointV2Manager { return s.safePointV2Manager @@ -1712,6 +1718,9 @@ func (s *Server) reloadConfigFromKV() error { } func (s *Server) loadKeyspaceConfig() { + if s.keyspaceManager == nil { + return + } cfg := s.persistOptions.GetKeyspaceConfig() s.keyspaceManager.UpdateConfig(cfg) } diff --git a/tests/cluster.go b/tests/cluster.go index 28506858f0c..607955cc6a9 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -244,6 +244,13 @@ func (s *TestServer) GetKeyspaceManager() *keyspace.Manager { return s.server.GetKeyspaceManager() } +// SetKeyspaceManager sets the current TestServer's Keyspace Manager. +func (s *TestServer) SetKeyspaceManager(km *keyspace.Manager) { + s.RLock() + defer s.RUnlock() + s.server.SetKeyspaceManager(km) +} + // GetCluster returns PD cluster. func (s *TestServer) GetCluster() *metapb.Cluster { s.RLock() diff --git a/tests/integrations/client/cert-expired/ca-config.json b/tests/integrations/client/cert-expired/ca-config.json deleted file mode 100644 index 30cc837100e..00000000000 --- a/tests/integrations/client/cert-expired/ca-config.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "signing": { - "default": { - "expiry": "7m" - }, - "profiles": { - "server": { - "expiry": "7m", - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ] - }, - "client": { - "expiry": "7m", - "usages": [ - "signing", - "key encipherment", - "client auth" - ] - } - } - } -} - diff --git a/tests/integrations/client/cert-expired/ca-csr.json b/tests/integrations/client/cert-expired/ca-csr.json deleted file mode 100644 index 8be8f743fe2..00000000000 --- a/tests/integrations/client/cert-expired/ca-csr.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "CN": "My own CA", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "C": "CN", - "L": "Beijing", - "O": "Pingcap", - "ST": "Beijing" - } - ], - "ca":{ - "expiry": "87600h" - } -} diff --git a/tests/integrations/client/cert-expired/ca-key.pem b/tests/integrations/client/cert-expired/ca-key.pem deleted file mode 100644 index 2d8258dbcdc..00000000000 --- a/tests/integrations/client/cert-expired/ca-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA3wh9AXig75uVYTj27ZuT+GbcPYN1ue0iH5MK9kqeiDUFOAJM -7BB+j3+RWbbTv/CAO4Gdhe8wIhNa67uoOqfNr5A4FcqgOboO+H59PC1yDNISCoGO -4fr3DSVTT5jWQ03R26e15/lxUGZDkf43NIOlo5PlHYYo7f7pCHJaYQ9U3ZnF393k -TMyoB0MpBflupbMPYLdn++0ND0W2MpWhmFTih1FL5JJ6/PlKqzVHFFNqGIyUGHC9 -XnX9jk3v86NJ0WkTBj6mOim6lmQOga1CAsD2O15fLI8S8py2jn0SWT6NDgh6S09Y -Sj5LRZkOwLkBpgBgg/MpchBeN/TmBX5jKNlYpwIDAQABAoIBAQCPEonmPLS4oEy2 -Pm2gxPGqIonb6A3IRIdkS9Z3YizKYYDEii1ALeCSOxpJu8+gTC5mfeJH/cUZxuLH -X+0uG2EF43Um/YHFJkbeP34k8V49PTEXjj7TVkPPKgeEgx3HtQ8PYkl90vVOxxtH -dxWA2YaTJZePVfXBnolswraZltUxBXYyrTJZafMrx26Ik6vdjpiS4vP+FejaS/Mq -+JbIUL3iQzYKrQ+EsBG6qjJUZ1gI1q+L+h5bBZHyTUblV/erZ1zlxvkZ1MPGDJ0Z -blR5TDQrQ6Mol5neAUV5GxG8ZdktJMLYFel+g3pmx5ulklza+PIDFOTLcXpwS5ws -LyxZQZdpAoGBAOYwQvXg7FYUjE0fhCjZQFgMYMSkl0LigOOlm8xA+gT20R7MoVYB -LvIG6AfC13t7BZ5Vi/yrbELDnkhc+H8FlKHj3JwapPzH9L94IXAELf+PigsKsJos -p3dpKh4SQ9+SjaW0mLhmYFO15bta6blzIdUgQoLjU//2K+fszPTb+LglAoGBAPgK -05YB2P9Jn4ROQURzlZB6EIIl7urnUMg0mO4KPKfA2u3bXXQb/8uocvI5ZAj3/D/o -FslTuQW4pfciovFn+jfTvZMVr2SsJMTPX154ekTFBzCfzZ0ihT/crnotkKW7EIkp -XQLQbiGvfI5aZ4YhgP0ZyOxc+PH48dfA4JVGdz3bAoGBAKIt0p+lzx1+8LLNx7F4 -D4t5fRxO0nu/VgwN/EzWYtDojMHkbq9Huimvj/8X2fYX4QeDQlPM+0O2y2g0iKgF -6Ih/IEmjxCaNQvU70GM5rqbmHN5Ws8KMP5k0MQZq3ANDICVlrkwNZUTVXXy5Ov78 -DRQ53GKXg/FNIfYPsv+5k+05AoGAFVmRNsM381lZ8qBtu7+bKxFmpF0xgGSirmjg -lPSqneHatkiAdcMHNHduVW0dMCxwOOv4MiITtetb1bbUgaTqg62lDqj6LNcoXwxe -cBo8o/i1krjekNzszT9ogTm0zp8YYEYALILWR+378aDUclYl7SMwCTBDeUhtQJ3o -dtDW6KsCgYEAhgUxBk8VZnqnZVweB34ITfTDfCCR6W8uyhZhP1b51CRx1wARw08p -G7fEKFdagRH02GHlynJoglmUhjaIireT0or7ONW0oQ8IEU8gmeQe5UP4IwSJ0rH8 -7z+IJEWDwpUTr/1mWGkJDZS0vJZVF6V46tm6aowyDs80W5rX/uwTV1k= ------END RSA PRIVATE KEY----- diff --git a/tests/integrations/client/cert-expired/ca.csr b/tests/integrations/client/cert-expired/ca.csr deleted file mode 100644 index f98ea92fd8f..00000000000 --- a/tests/integrations/client/cert-expired/ca.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICnDCCAYQCAQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAO -BgNVBAcTB0JlaWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93 -biBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN8IfQF4oO+blWE4 -9u2bk/hm3D2DdbntIh+TCvZKnog1BTgCTOwQfo9/kVm207/wgDuBnYXvMCITWuu7 -qDqnza+QOBXKoDm6Dvh+fTwtcgzSEgqBjuH69w0lU0+Y1kNN0duntef5cVBmQ5H+ -NzSDpaOT5R2GKO3+6QhyWmEPVN2Zxd/d5EzMqAdDKQX5bqWzD2C3Z/vtDQ9FtjKV -oZhU4odRS+SSevz5Sqs1RxRTahiMlBhwvV51/Y5N7/OjSdFpEwY+pjopupZkDoGt -QgLA9jteXyyPEvKcto59Elk+jQ4IektPWEo+S0WZDsC5AaYAYIPzKXIQXjf05gV+ -YyjZWKcCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQAcAsP2Ix4UcioZi1QZoQUH -OajonLUFbbCjmVdV8cTWZpAXje1sh/cKzI5jxmlw7BF4J+m206Cb7/vFM+WtL5Qt -kDyVInnuuXsP+7VUEzdchJlarFk5KMkOz9dl3AxXyXwxi4aoMOLpcC8Ye0Emq7bt -aG17jmf3eQLnhyhdzrob9dGDWyRDHI29fI0kiPGu5wciJLW8RiOdeMdquwrkx3n3 -FUkGU1d26S/FXR4UMz6iw530JBgp1mDmU3bY5GmtFN2OpURYa3pogwtJ+bjddhSS -NrhOsDnkUdvfQkGz9VR6Vyk9c1kkmLLzfMU2iY4KQiTCPDT/Yaq5JEqG0e1Sl/Ag ------END CERTIFICATE REQUEST----- diff --git a/tests/integrations/client/cert-expired/ca.pem b/tests/integrations/client/cert-expired/ca.pem deleted file mode 100644 index 0955ceb1e5d..00000000000 --- a/tests/integrations/client/cert-expired/ca.pem +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIUPbO/J5qmPTaZBDU6gTnWq1iCUPMwDQYJKoZIhvcNAQEL -BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl -aWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93biBDQTAeFw0y -MDAyMjcwMzUxMDBaFw0zMDAyMjQwMzUxMDBaMFcxCzAJBgNVBAYTAkNOMRAwDgYD -VQQIEwdCZWlqaW5nMRAwDgYDVQQHEwdCZWlqaW5nMRAwDgYDVQQKEwdQaW5nY2Fw -MRIwEAYDVQQDEwlNeSBvd24gQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDfCH0BeKDvm5VhOPbtm5P4Ztw9g3W57SIfkwr2Sp6INQU4AkzsEH6Pf5FZ -ttO/8IA7gZ2F7zAiE1rru6g6p82vkDgVyqA5ug74fn08LXIM0hIKgY7h+vcNJVNP -mNZDTdHbp7Xn+XFQZkOR/jc0g6Wjk+Udhijt/ukIclphD1TdmcXf3eRMzKgHQykF -+W6lsw9gt2f77Q0PRbYylaGYVOKHUUvkknr8+UqrNUcUU2oYjJQYcL1edf2OTe/z -o0nRaRMGPqY6KbqWZA6BrUICwPY7Xl8sjxLynLaOfRJZPo0OCHpLT1hKPktFmQ7A -uQGmAGCD8ylyEF439OYFfmMo2VinAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSktUF7xrXV//2h1KeL6I1VXejz -8jAfBgNVHSMEGDAWgBSktUF7xrXV//2h1KeL6I1VXejz8jANBgkqhkiG9w0BAQsF -AAOCAQEAJRus+wyu9bJCaISJtKa4Et/tfhguW0H3V29/8yby38pO8YLwR2p8/5jz -ciL8xSyEhOp9y6YDZTyeO5pLCaJZ2H53BizM/e/wJz+J5apkFOWFyACO9dl924w1 -1d47kbXXv7TNm16fY9ja3ss85MdazeH5OKnvAMlapltylJhVd8uCCRx5GdFvFbim -UtzR1qirP+uOhF8tYbsGYS1mzsJKCXe3TENkKIsvrPWTtbEvAvDJLqXFTsFEdCF8 -JGLjg/BXawVOY/RAVfjnWMgFjP0SY8itIfbjUPYfwPzUKkKYbH3/MZzoiSkutmJW -Z/qj9gWY5UovzwzaWde/FrNiaiJGdQ== ------END CERTIFICATE----- diff --git a/tests/integrations/client/cert-expired/client-key.pem b/tests/integrations/client/cert-expired/client-key.pem deleted file mode 100644 index af8a06d6b93..00000000000 --- a/tests/integrations/client/cert-expired/client-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAo92Lb5C+BcSxfjEnYAlaJycaA8+acuElRV3VKgWzLzWpvjfU -6cpTwhNYEYDqx9DbLwsdmz3IE+V6MncI/jHtqmKYgJ0uTO80dcCWj8Ej5NZBmtw6 -fJkcO535caO/dzywWY/O6claTNF3mBTVTirVc7lUccmqQUfrHEZYeQNvrAlSZN5n -ToHaJgU52UpbO1rl43A9fgTD9OSAWPjCgOrJgnXiDBOUV2lNO00xTTyKhAw311C+ -ALI6juHDcpEdOgawgToyDSe73QWNz4LWJ0AQc3tvwdAReTzH8iEsjxfSeg/OvWBb -yKt/UNWFhsRd3lnKci+xutgpP2dvTEZgLCdFsQIDAQABAoIBACteWiX31+Ls8NDm -L3aiYwLTvXXevxDCd7vJHr6VbZwEBt26JxaNHufReNCBHyItK6Viur44driVVmb+ -zBmCm1Up7RS1BOkFsgusIe0AAADzl79lo/EQEMHSiI9D+EuVe1Lo1alYShbkpVNR -yDZ87atIFIpdBjtqzlXYZFf6ydZs4L/noVxqXqgkzUi4v+1QPFMO+dDcsoxFWM8R -42pCndEmXSxfSW8WkB+yeh++/3hGNmqTL8rVVI2So+JXC/3eSRgMsB+T+zzs7Ltu -fBpyAzkULT1TOk70nqXKN5mJMXVs4mtSS5Fog/pOL0s6IMh0rb/kSGsPY1pmXVyk -L7p9DiUCgYEAziWIf7oR4x6tABjPArpZzDcWoPSYxHAvf4Vq/7E3jpOnopYfGHZ5 -Lyc23EyFELi8beD69u9B4p9b8jIJH2lzhKsAz3BAeLEPLkNmJTzsBtcKQmJIp5TH -fR3S0ylFLJfI+1i03b8stE7Ybs901Gbiz97n0W5mD7H/61cuKLwbtscCgYEAy35l -fItz8usDyAYWDs9QkdbO4D12g/Q61t/SvSB2Z+/4DmPWrEibeETIty2oLpOot97Y -GMZ5fRJK81ip50dPMMLv3dle46LcYx0iO5Cxf5DowJrASV/+mpjm4XtrMz1DPdWR -Zkdsj0ABOi0zpEpagU30s/Tle12HhGKlDx69R8cCgYEAuMatxgcLd2+Mk/SowQI1 -8TVDpDv6HsGL4Zq+ILhBWxwAe1xiPPQchWZ55xbzgtyDkV3CtcoZXT+IE9xDjxNf -RwBcNpOgRk42lDo3eZNU4ICpon0kMInMEdrEmR958JGQxbJqVATC1k+B8jkjuCsM -jFR5p6Bz/QeIo/K93idWFAsCgYEAkfbDExfwfqFG4BZFgjAAyet8DurndPUds31u -60w6wokltCS3GnMmWZl1I7ezOF2gGuvUs8jB4g3aBz1k+clzqzVY2xgNcVkjnlCK -epFqUGeAHI+kN72MohXlHn3pwiJCdjsYBXZcD1DI0JMVxNW0n6VsThzu8pN/Hl2n -qfXRrYcCgYEAgzW0DgQHU3BYuBVkgn/uZoyanJGaFQQDh6UAX/ATIZ0p3kG5ASr7 -z1oaVVRKv7XdJq+BjRGN1DdmPupRZI1GPKtpD++zhJ5mIJOMNWSMP5+jn7Y+DJAx -IMEgGrYuGiQbMq8hZt6WyC5g3/UCK+j+ag8vXp5snnJOYo/pKuklDxM= ------END RSA PRIVATE KEY----- diff --git a/tests/integrations/client/cert-expired/client.csr b/tests/integrations/client/cert-expired/client.csr deleted file mode 100644 index eddbcd27513..00000000000 --- a/tests/integrations/client/cert-expired/client.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICdDCCAVwCAQAwETEPMA0GA1UEAxMGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAo92Lb5C+BcSxfjEnYAlaJycaA8+acuElRV3VKgWzLzWp -vjfU6cpTwhNYEYDqx9DbLwsdmz3IE+V6MncI/jHtqmKYgJ0uTO80dcCWj8Ej5NZB -mtw6fJkcO535caO/dzywWY/O6claTNF3mBTVTirVc7lUccmqQUfrHEZYeQNvrAlS -ZN5nToHaJgU52UpbO1rl43A9fgTD9OSAWPjCgOrJgnXiDBOUV2lNO00xTTyKhAw3 -11C+ALI6juHDcpEdOgawgToyDSe73QWNz4LWJ0AQc3tvwdAReTzH8iEsjxfSeg/O -vWBbyKt/UNWFhsRd3lnKci+xutgpP2dvTEZgLCdFsQIDAQABoB4wHAYJKoZIhvcN -AQkOMQ8wDTALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBAH0WbgQ3vny+ -Axtki5uoz5mGezM8CZT13nR6G66p00Lo9HM0H3hPyScnbyX0D2OymvzmSr/CcsYQ -P/r9nJ9vCtUDbkxSypSacgzwfRwpTFuF9prj39BdyDUrDD7Y3oGhr+6SBI7zk6dB -L4zSX0BcTiXDvKtgMgPGAj0H+JcnHNvjJuMCQE3UEVCfKu8jqNtOXY5gbTdNZJKC -64vG7+nbQBjctNOEPyEtWR2r7jYKBjvBjR80uImXQ/mB/Ka5FfQ0l403gDg6837P -3wWyLd5c8xxPBCFmWv/rwwb5/myT5V4+ozAmFnyyFl/LjAwlcJtypN8wsFTrnpyB -xNkKWnuu0Z0= ------END CERTIFICATE REQUEST----- diff --git a/tests/integrations/client/cert-expired/client.pem b/tests/integrations/client/cert-expired/client.pem deleted file mode 100644 index 707c7afc45e..00000000000 --- a/tests/integrations/client/cert-expired/client.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDejCCAmKgAwIBAgIUc2EiY1NOtJN/411QByfHK/I6eMcwDQYJKoZIhvcNAQEL -BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl -aWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93biBDQTAeFw0y -MDAyMjcwMzUxMDBaFw0yMDAyMjcwMzU4MDBaMBExDzANBgNVBAMTBmNsaWVudDCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKPdi2+QvgXEsX4xJ2AJWicn -GgPPmnLhJUVd1SoFsy81qb431OnKU8ITWBGA6sfQ2y8LHZs9yBPlejJ3CP4x7api -mICdLkzvNHXAlo/BI+TWQZrcOnyZHDud+XGjv3c8sFmPzunJWkzRd5gU1U4q1XO5 -VHHJqkFH6xxGWHkDb6wJUmTeZ06B2iYFOdlKWzta5eNwPX4Ew/TkgFj4woDqyYJ1 -4gwTlFdpTTtNMU08ioQMN9dQvgCyOo7hw3KRHToGsIE6Mg0nu90Fjc+C1idAEHN7 -b8HQEXk8x/IhLI8X0noPzr1gW8irf1DVhYbEXd5ZynIvsbrYKT9nb0xGYCwnRbEC -AwEAAaOBgzCBgDAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIw -DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUWJpKaemXoYL7mQFflkeejKsYHaUwHwYD -VR0jBBgwFoAUpLVBe8a11f/9odSni+iNVV3o8/IwCwYDVR0RBAQwAoIAMA0GCSqG -SIb3DQEBCwUAA4IBAQAa6qNPFuIcDEXFXVqkiqehDwWn5vhSvTRCBP4fHsuUC9GZ -8L/ymED0DlwuWlxREeT4FV4OYda4QLLzcsyDGJYkYCwb6QNAgnrm54THxCWcP5nq -KgGIQtJKA0ML9doDMjLU9BbPYj+SFI6A7lzHDoT9Qq9o7n+Ef8idSJOWjr4OY9G7 -udlnEL/HvB/E4yfBcjxCzIV8EGU+aQHtisQpxAcDxqSpCzGjedZ7zZSg/4Y3KzzD -deVVRO7GHt1fGEQUfop5L5IIPHgqIjO/wKdXe+A0UaoLzz3JJL5xymSBiRGt2dIJ -WZRbIDYCumtIzwoILIvAmAJZVHimvWBV39yiVT4R ------END CERTIFICATE----- diff --git a/tests/integrations/client/cert-expired/gencerts.sh b/tests/integrations/client/cert-expired/gencerts.sh deleted file mode 100755 index 96dadfa3686..00000000000 --- a/tests/integrations/client/cert-expired/gencerts.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'cert-expired'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert -initca ca-csr.json | cfssljson -bare ca - - -# pd-server -echo '{"CN":"pd-server","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server -hostname="localhost,127.0.0.1" - | cfssljson -bare pd-server - -# client -echo '{"CN":"client","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client -hostname="" - | cfssljson -bare client diff --git a/tests/integrations/client/cert-expired/pd-server-key.pem b/tests/integrations/client/cert-expired/pd-server-key.pem deleted file mode 100644 index 2914cb03b3f..00000000000 --- a/tests/integrations/client/cert-expired/pd-server-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAtmzUQNZ9BqLQJWpx9V0/kRB789TQ+sO3boWpLCn2eZO6e8+f -sa22jrYp4SHG7kpgUFnQ9k+HgWO6lOesjKrB0BYqzN9y3jqvwy/A7jw4UVirSX2t -eFmRmsUjActC2Efym0lqix74ybAzr5J3K5EpbWEmJXm7wilkfT6tLud+FWqQA+k9 -M9OCAQYSVzGMrWCqcUEWJvD3l/y14ksr3ZK9wHi3r79HFJFRaO+uiUc289t3fAPJ -VJYLTnGX1HRGmbDn7Og1vQth1BYvbjKFGNeOaSlJZsnt6Wu7clF2ANYYzq+X5HuS -BRmUlHvh9LXfNnOPZfRKRRHLLPTH+ff3GjHWdQIDAQABAoIBAQCH9ZxLppAP/hA7 -kpXUVOcnaq23EImgG3X+vUdUTwIPonZ+CEPw3JMO0d1smQv2VSBrQkVnDebkHDvW -9sO3IxzHxX2oq3ClCAAu7MxRwjgHCJrAbxZ2d+r4+qwYjjEHmLfL3G+3uCGbz+L1 -m6eNpObHSnxvucah8s+eOLRCHO3vbvURAVLME8f3BG8sNH9je7EaPxS9CicYvGYu -0GTqO5PzWkZVFh6Z640prz0gtyDytaqgMXj9zpCaZa6d6gXQu/CPe6OpimBwVPFA -Lu2liQSIxcWNIlGedxm+45ancSfEMeiSRcthgMM1knuduIT+ZFje8SIY5sCQD+kG -jlvUXBuBAoGBAOtm/hhoq4FLa/PIFoUZ2t3XsUVHG9TFZ6OYCCGPV4qAVbLDcdRP -fMgSIwO5mq1etwCJaMawieUYVvj7dS7dREq8R68c9EnFdFJJ8tBu6y06ogTiGiIG -gMmM7tGEXWvFRaMsdllFLQT7OpcjxJl1NksYp0E9peNaZuaaHTlcIOm9AoGBAMZj -JfNktY5lKIrGhFu/6OV0LkXAfwMmlyxv7ARmlvOGACAIpPpDt3qdiEQAJJ1SWyzu -uf1so9uyn1Je2k0TpjKA67sSptFO3GOVFHeHO9iQ/M33+Ogp5oUvAaF47eopq3d0 -Ko0YKeu/TA/XZh2SOrvjay1URg5807qjwyoS478ZAoGAdeLaFPc2DEXtBeSKApX0 -GWzPBdaahW9me3LDf6r9OAsU37Uo6B32a2tJxa/JWlE4bDhkFDyMkgIibAXhpVqB -vLuVWoixfdA4dI074E3r0HawdKmWVLvU+xps7tfOwQ3F1fWKPyJ5bSkzKkVrSz57 -thfeirmIvdaj+Y/sWrFcejECgYEAggd8WWJZ6YH+J69kE2resH4M0iUQWrVRgou9 -K0k+iVD8BgMEdxApU1Grfb6GQSM4pWO3PhaV86rI6ElJVhmZ5iI/37ai5i+FHjQZ -XRqjLBgjyrBMUYcdE5Ayxm4nqkIzo7DdLut2lpEkvoFU6e7tVjcCCYzh+h7w+7TY -d1w4MAkCgYEAuUt5lj0pGfhOb716PG6+EEmGDVh7Q7X15idptY+SytnW1cokcxEj -t6ZhFHZbj4v1Yxs+ra42zacN3GshXWU+29ZKmGxRMYvg+qKIfTb2XM1do7ZtXrbe -HgAq/9qXGLrfsDspFzjj0DvZ66j+qP4Ptfk5ryKATrhebMNyD4r6Gj0= ------END RSA PRIVATE KEY----- diff --git a/tests/integrations/client/cert-expired/pd-server.csr b/tests/integrations/client/cert-expired/pd-server.csr deleted file mode 100644 index 4e1d3b4d136..00000000000 --- a/tests/integrations/client/cert-expired/pd-server.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICdzCCAV8CAQAwFDESMBAGA1UEAxMJcGQtc2VydmVyMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAtmzUQNZ9BqLQJWpx9V0/kRB789TQ+sO3boWpLCn2 -eZO6e8+fsa22jrYp4SHG7kpgUFnQ9k+HgWO6lOesjKrB0BYqzN9y3jqvwy/A7jw4 -UVirSX2teFmRmsUjActC2Efym0lqix74ybAzr5J3K5EpbWEmJXm7wilkfT6tLud+ -FWqQA+k9M9OCAQYSVzGMrWCqcUEWJvD3l/y14ksr3ZK9wHi3r79HFJFRaO+uiUc2 -89t3fAPJVJYLTnGX1HRGmbDn7Og1vQth1BYvbjKFGNeOaSlJZsnt6Wu7clF2ANYY -zq+X5HuSBRmUlHvh9LXfNnOPZfRKRRHLLPTH+ff3GjHWdQIDAQABoB4wHAYJKoZI -hvcNAQkOMQ8wDTALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBAElCcTJa -65SJyeyzRFUsf6QTUUGGt5OCKQwLfUwWf1SY8h9qHWz8opBQ53hWR004kUo6TfRi -mV3wguzDYNfscBkDEITOvKMYmWpruoJtzGzVmzSE16EFI2XOrV1YRq+0s3u4gDJK -HvPdL5barh7W+NkZItr6dDGKc7tMwPrhBs7Lo4v60NM16sJQRgNIOZzaAk+e+Exx -vG6n9E8mjXW4sl1xovkcQCG6JaPLCdve3/4MD2pbZOlL8V+KtII/7T5UlgIlDkpt -fsODEeARcRUAkivjFGNIdY1VOJ9uq990vameztNecCNTMiRNNW7ObSC/2Gwbq1+f -uBUt2QfzN0+AT6U= ------END CERTIFICATE REQUEST----- diff --git a/tests/integrations/client/cert-expired/pd-server.pem b/tests/integrations/client/cert-expired/pd-server.pem deleted file mode 100644 index a5a3534b3a9..00000000000 --- a/tests/integrations/client/cert-expired/pd-server.pem +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDljCCAn6gAwIBAgIUImWBDH7tmmm3XLbnnM6ursx5B4cwDQYJKoZIhvcNAQEL -BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl -aWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93biBDQTAeFw0y -MDAyMjcwMzUxMDBaFw0yMDAyMjcwMzU4MDBaMBQxEjAQBgNVBAMTCXBkLXNlcnZl -cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALZs1EDWfQai0CVqcfVd -P5EQe/PU0PrDt26FqSwp9nmTunvPn7Gtto62KeEhxu5KYFBZ0PZPh4FjupTnrIyq -wdAWKszfct46r8MvwO48OFFYq0l9rXhZkZrFIwHLQthH8ptJaose+MmwM6+SdyuR -KW1hJiV5u8IpZH0+rS7nfhVqkAPpPTPTggEGElcxjK1gqnFBFibw95f8teJLK92S -vcB4t6+/RxSRUWjvrolHNvPbd3wDyVSWC05xl9R0Rpmw5+zoNb0LYdQWL24yhRjX -jmkpSWbJ7elru3JRdgDWGM6vl+R7kgUZlJR74fS13zZzj2X0SkURyyz0x/n39xox -1nUCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUH -AwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMuZjO81UxYHG3oX -8/yYst7QNWrlMB8GA1UdIwQYMBaAFKS1QXvGtdX//aHUp4vojVVd6PPyMBoGA1Ud -EQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAep91Vx/n -PLfE/HLxUpG0/CQFZ96BbR2KNmNQZ3YrDaEdiou186sT9sJ8cSW2EuumLBNggWPh -kx7zE9AjJsNhe2tYnPefpkYTr0nJKyqyR5I9qg2yWm1bzCJ0dIx8yecfCOLFZQaM -4ihcF8O8OYijlEZOwJ5QhTnNoJ23hYsg2JgkhhaQMXTDYRYQ06QGz08dBwczAOyF -AXvUDzIArRRM7ELPBGlaerBwsJNyYV6m4tpZ+SkvXspGsRQ5glf2N1zGMI+wUuyX -YTJT7ajpOhq5KO2klWw5w3nmAsCh2xZsu26z2vTCKRfs4tic+Y9N4LNONkx5wQuR -V7KQe54/wOhlGQ== ------END CERTIFICATE----- diff --git a/tests/integrations/client/cert/ca-config.json b/tests/integrations/client/cert/ca-config.json deleted file mode 100644 index d003a0bbaba..00000000000 --- a/tests/integrations/client/cert/ca-config.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "signing": { - "default": { - "expiry": "87600h" - }, - "profiles": { - "server": { - "expiry": "87600h", - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ] - }, - "client": { - "expiry": "87600h", - "usages": [ - "signing", - "key encipherment", - "client auth" - ] - } - } - } -} - diff --git a/tests/integrations/client/cert/ca-csr.json b/tests/integrations/client/cert/ca-csr.json deleted file mode 100644 index 8be8f743fe2..00000000000 --- a/tests/integrations/client/cert/ca-csr.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "CN": "My own CA", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "C": "CN", - "L": "Beijing", - "O": "Pingcap", - "ST": "Beijing" - } - ], - "ca":{ - "expiry": "87600h" - } -} diff --git a/tests/integrations/client/cert/ca-key.pem b/tests/integrations/client/cert/ca-key.pem deleted file mode 100644 index 377a02bd729..00000000000 --- a/tests/integrations/client/cert/ca-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAyW4ndkuzRmBAy+Ur7hV6kTjcP1Fog5z+W0d6RxT9AG47S9HV -QLYuiGwib/GhkOgH6SvEOJ36Rkc1vhoskHbqdWNlaPJYWXDZrc+hHgK1AKeFSkZk -XivAW+x2yD1vj6m6m8nPerR9kG653eIpCrv6izdWFQix2yUzTxvQUR/MXyKoeD2V -5A63pLg/F2Nk/N8VW790y8Ld/4zv/z4pO+JoU015hnXuQ5Iiqf08qtzElSEwPDT6 -r2vgDc+82t3RxJ3UYznvdhyY+/4SI2J3R0n5s8NHqLum4FXIARyEB/T0mPY+0BL8 -hLKkA3F5z2xW635iO/eHK1e2CcMaeqyED4l3SwIDAQABAoIBAAKZp2zBgWjxKST4 -ql6CbHifcUhn9p9sUWRAQfXU8Ycl5SIPbV9Oer9MFg234swKEsARzpCkiWyK0sjH -cbfTsScex1pZdoaBDG5P9dZ7VnbRssjIq9cDXZJSNR5UnLIQENad/k2pMs3AgCm7 -F6iJ0KwnZLcUy6b2jfkBdOj3Lh40QG2XJLf0YV6f9d+Ib7Q6U9cC1ryjsNQDT6Y3 -b0h8SYQQyVwgStmJ8IfAB7LQuZUIHrryld1yawHERVS4AQbxhXYpqVBrzwLSx3KU -/AIhEWnfuKo90hTpJWQgo+JdvkgFTW1THbqRRJxKpLQ6XhIezosVxCh+KY6LhZYL -IoIdTJECgYEA/uTj5AcZlnxGjQDm1MMJ/ICQf8Sq41Rkkm0mgmWHnL6csQuJrASn -l/7Pj+WFvo3G17ejECGFGGR74sUnaODeAsP3IZU/dlOSPT/lJHMoq+UNSneu8vVA -6MEYHJoPDQIZX8VChi9Xm8lRJKokJn7E3S3RwFLT2FVrHWJJXQsx/q8CgYEAyk3h -xYCia+YTIjQMOhYds2HwF66a3Rp49Z55PY53PUB54jpFiIAJ/cAw3iQ6dXWW8Bmo -EBEk+nFDLeiGEg8oZWMBmc5eNwsh6kOV9UpsfLplVY/rUaeIX8XB1A+ZdBE9Q+vI -4Yy7ggF2kze4D5ijiB0vFXGIsU+Nz9Yzx59U2CUCgYEAtNF05L8wpLNWbRKVrZsK -i4g8eZbvT0L/8Wvi5J/XuxbxGxs/N7s5lLabUc4VuZ7jzwWjdH8C5tHpnG4Vze2v -MLEJsFYq5erVFAQurlPPJ1neutP6VLosqLDKRQf441Z435qU3ZHaCVaf7G2SJYjK -aMjnQhK/23iE+xxNROb7OVsCgYEAvHQUjyEnLJAk3sXRZgLYJBRlzgnPJYzVhSjd -FCS0mxCG+eECiQeNRx5T53ukIWsq8ftUfbMa2VnNFOT4j7YjEV9LTHXbejcantna -xadQrFgOscfhlC0WcvELgHoPnvm4Mp9ggAvTWGX4iWdsRMsR+2gERlt+1H2hQyzt -C1Y3eSkCgYEAgR2FO4zZ9kYYRxqoE+4oqFEleoNWbeEmb4Gtqw8A2z3+AJQGoey7 -Q/RepkYl6/HvL+Q+SDSwK8jlw4IyCgBhJ0BOvFzgMuwz0qbI+mgFCAX+ShNfdNBi -kfKH1aRftid/O2na1b3kreiJiHIAMpO1aLoF/eE3DuHlv7VcOndiSPY= ------END RSA PRIVATE KEY----- diff --git a/tests/integrations/client/cert/ca.csr b/tests/integrations/client/cert/ca.csr deleted file mode 100644 index bc024dd984c..00000000000 --- a/tests/integrations/client/cert/ca.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICnDCCAYQCAQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAO -BgNVBAcTB0JlaWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93 -biBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMluJ3ZLs0ZgQMvl -K+4VepE43D9RaIOc/ltHekcU/QBuO0vR1UC2LohsIm/xoZDoB+krxDid+kZHNb4a -LJB26nVjZWjyWFlw2a3PoR4CtQCnhUpGZF4rwFvsdsg9b4+pupvJz3q0fZBuud3i -KQq7+os3VhUIsdslM08b0FEfzF8iqHg9leQOt6S4PxdjZPzfFVu/dMvC3f+M7/8+ -KTviaFNNeYZ17kOSIqn9PKrcxJUhMDw0+q9r4A3PvNrd0cSd1GM573YcmPv+EiNi -d0dJ+bPDR6i7puBVyAEchAf09Jj2PtAS/ISypANxec9sVut+Yjv3hytXtgnDGnqs -hA+Jd0sCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQCBDCdUp4A66y68NhcouGdm -xYPXMiG7egVjKoW0cm47g7ezbrJsoHRZqfKaBlN0B1Z2AR6JW2Ucl7mjCeMrogwo -7I/fRbpHeFSIPXRL5puzF3Ph6t6JcLDuWpUli2wvJpGNJqaVMGiOisaWs4ewpBgU -LKUQauG8jyUzfpYp6t+MgmTVGjB3Ml92QwphXuOJfk/n73suHGfEC+eCz4gs8MVE -mR+5os4Dwj3Gnk2915iqdqVYc2YXBon9PW8DjmqPteRtL/va849mqwvsH9z3hrGS -G6zWPpnvEYcTBNfoEbtvfpnIs8pdWpRS9aGAgRcQG2iZWCKe0xKg2GKEACNeRuux ------END CERTIFICATE REQUEST----- diff --git a/tests/integrations/client/cert/ca.pem b/tests/integrations/client/cert/ca.pem deleted file mode 100644 index da30c587eeb..00000000000 --- a/tests/integrations/client/cert/ca.pem +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIUak+RvdkfgpVX031HzHU6pgdAaDgwDQYJKoZIhvcNAQEL -BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl -aWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93biBDQTAeFw0y -MDAyMjcwMzU1MDBaFw0zMDAyMjQwMzU1MDBaMFcxCzAJBgNVBAYTAkNOMRAwDgYD -VQQIEwdCZWlqaW5nMRAwDgYDVQQHEwdCZWlqaW5nMRAwDgYDVQQKEwdQaW5nY2Fw -MRIwEAYDVQQDEwlNeSBvd24gQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDJbid2S7NGYEDL5SvuFXqRONw/UWiDnP5bR3pHFP0AbjtL0dVAti6IbCJv -8aGQ6AfpK8Q4nfpGRzW+GiyQdup1Y2Vo8lhZcNmtz6EeArUAp4VKRmReK8Bb7HbI -PW+Pqbqbyc96tH2Qbrnd4ikKu/qLN1YVCLHbJTNPG9BRH8xfIqh4PZXkDrekuD8X -Y2T83xVbv3TLwt3/jO//Pik74mhTTXmGde5DkiKp/Tyq3MSVITA8NPqva+ANz7za -3dHEndRjOe92HJj7/hIjYndHSfmzw0eou6bgVcgBHIQH9PSY9j7QEvyEsqQDcXnP -bFbrfmI794crV7YJwxp6rIQPiXdLAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRLJQxNGu3t0+JhN9NxsqGLb4Ne -BjAfBgNVHSMEGDAWgBRLJQxNGu3t0+JhN9NxsqGLb4NeBjANBgkqhkiG9w0BAQsF -AAOCAQEAlPSpB/o9F4MMxXAwfGLGaWhEsHAjqNWw7rxUC6Pt9aNlYj+5YlDNN1gV -IlCg7PvcdNfiRpP031QwP5EKFCAwl1O49U97N79ClCL8GRt4Kavw4ejxrKa8uI/S -IWdfIkuYQbgJWtnneNuAauNeeq4XGkqkmVYWlGs50TysFRY4HxjqWn7r6FFDKaLE -txpGpS7BX8sLBrvug9+UCeuMYBjeGSVZu5np9Fxkdy9JMKeEL1nbnXhAKOrgAWPg -pOvn3g+7ucaoLrFPtChtvHk0RNYTF+6P9PFFY5JigfQoN25hT6PciQUTOX4hasc6 -gwzBGGsFMsPdnvC03dWA2uc+a2T34w== ------END CERTIFICATE----- diff --git a/tests/integrations/client/cert/client-key.pem b/tests/integrations/client/cert/client-key.pem deleted file mode 100644 index a20cab9d2db..00000000000 --- a/tests/integrations/client/cert/client-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAzw6oKaWJmsTQ3e380k2CSj7+lMndBJoDfgkBsSWyfurfRcgf -ErmDgvp67p7ibgSHzI0kJLX+d303783etDxRZeI6WffCEdKTRhg4rNzxi9uMt4QW -2U8sQGJN0M8RRogyrGvxJQ+RQvjzuoqMTmvLB3LriXkhv0kmq2F+NZ6GnQTtPDs2 -oc2McQ2/Op3Edb84HR6f6QGc96c/CXSnQkpM3etfQLKrlaTPbklzXIZemS0Ve0as -UF6rbEaCkbnM0vuHl5s9irEAuXfz6jZuO+ss5PyXBm+rhO+mzWh1rj5fK33dwEd1 -tE4dOF+N3pgdDfoKpWtHywIrdhlejJ70XRzqTwIDAQABAoIBAQCiswiumPX0mFzr -VntIIUGU58UR00EJwZ+m5JrgwRduJU7GPYc1JnLRc9MvN8gC3Sp8MBfLhPpsmAdh -Nqqdg5wOa+KgGU/0wzuYY6X9JyviUuVSusddgJnsCZLN1jfj7VesJrF5Bq+FKawS -05WGdas2sjWkc3tyHJ/3IQeUSHnMXMxXSBeyyg6U+uPIMW32BuKj2rKNPV4hIrP6 -bllpZJF3WkSLPUOlkMnS4vUomz3u8tsLqAafmXWdZqsZMW7/ioW1RtPOswg8DtWx -i/px88A1/BOIoWNe+njg7yUS5YeTY/wzDCekTa7XiuAgJA3ZsGivn+pg8KCJCMQC -VcNGwu6BAoGBAPxjnJvxKh28nQ714OmlNrRw/bTh2d8KcDuJx0VkfAWNy/4wZwxI -qdtjQJ9YX7x5R71TuQNV39INvhpuu8KYIMvLDFm2V5gLSd8ZxbuhzccUJpKTvN8G -CjByvYCnA1KoTCbiqyBkYTXsgVoM7dn3dxJ3csYVndWtcffUod34SsDBAoGBANIF -A81lfZSFMmarhoEWeSuGZpeFzQ5cza3a2Ma/XhCwst04jYaKSZhcjsQZyI5q/wze -YKKF135q5QwBvhhNqu39Kd2VRV2Rw7WngRuGEznVDNTx740oAZMwm07RM3hxGfG6 -owQbIUn1OhsgjTSl4jXJewtgLmK6+1G5mG7sNF8PAoGANDqA3Bxp9MFlVwU2x+Ly -kSCYv+fE6E0GsKtwW0HSEGwpfK6ThI086TN+2fq1xRDr8Zfzv2bz7En/vwSPQlOs -5b9dDOuY8NPVM5/ntU5kgQAAg3CjMxvS2/fCk278VwyQxbM+anObUkdg9TubtPFq -6J1jWO58PQ2pefm8jWymO4ECgYAnRojPgItbmw0x5iHhQjKm0Rueeoc+iFxuht7D -TEZrGKBafpj48COTOrv4MFoxSBEqduvbeOwz2Am9lRXXta6hkxahOakfNoNDFXAv -lYNC7XTY3eXOoAyrWguxUa4ud/hCHIUf33L6QcH8ELpXfi4voN5B4lrKW+1j+zFm -jQW+QQKBgQDAmeLRTX5BtFCzfh+SfFIwgYZwtOyH7SBznioW1W9Gs+k80z2PEH2R -XX2F8cYTErRdK3URLPrl/YvFq7t94/1Sjy7Fx7s5Wm5LQkUNo9Z1KFGFMa4/1Ksn -bySqhC0gt+5a/Nnp8rk4O6mhqzegbaUPwLAWvXAvNedrEToalYvVpQ== ------END RSA PRIVATE KEY----- diff --git a/tests/integrations/client/cert/client.csr b/tests/integrations/client/cert/client.csr deleted file mode 100644 index d6e9f3b9c77..00000000000 --- a/tests/integrations/client/cert/client.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICdDCCAVwCAQAwETEPMA0GA1UEAxMGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAzw6oKaWJmsTQ3e380k2CSj7+lMndBJoDfgkBsSWyfurf -RcgfErmDgvp67p7ibgSHzI0kJLX+d303783etDxRZeI6WffCEdKTRhg4rNzxi9uM -t4QW2U8sQGJN0M8RRogyrGvxJQ+RQvjzuoqMTmvLB3LriXkhv0kmq2F+NZ6GnQTt -PDs2oc2McQ2/Op3Edb84HR6f6QGc96c/CXSnQkpM3etfQLKrlaTPbklzXIZemS0V -e0asUF6rbEaCkbnM0vuHl5s9irEAuXfz6jZuO+ss5PyXBm+rhO+mzWh1rj5fK33d -wEd1tE4dOF+N3pgdDfoKpWtHywIrdhlejJ70XRzqTwIDAQABoB4wHAYJKoZIhvcN -AQkOMQ8wDTALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBACtxEqbPkX4O -pPldiNIBZcbjuYuCiA4Clbit/XolDOQVT0oRic0kBRQwz8sZZdIsHtt6C+aYuxPU -aR88m1wnpdTySE7yl+ekofrhTaRUPjOOpV5FcFMn29m3YUeJNFBWpRgxU/RoeF6N -OzegA1pGqfcJZl2dPyH7rPniEpIDcubJ/O2MSc/XDhxkTkotRSBZezKUTkFD0UE0 -YJ2wZqGiWVkOm0FmeQCpY13NZJ1qX5rjcJECToZiDitLaxIEYo2ldFeuDBZ4bYIH -Rp1dwkMHl4n845pVJvvpYeNAT+dSbxLUj0Y5XLaVT7jl65J/PYMj1OYgT5k9xoGQ -dMw900dzQdM= ------END CERTIFICATE REQUEST----- diff --git a/tests/integrations/client/cert/client.pem b/tests/integrations/client/cert/client.pem deleted file mode 100644 index 76c17ad3e17..00000000000 --- a/tests/integrations/client/cert/client.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDejCCAmKgAwIBAgIULdC74rV6hrVVBS/nosUiL2gf1BAwDQYJKoZIhvcNAQEL -BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl -aWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93biBDQTAeFw0y -MDAyMjcwMzU1MDBaFw0zMDAyMjQwMzU1MDBaMBExDzANBgNVBAMTBmNsaWVudDCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM8OqCmliZrE0N3t/NJNgko+ -/pTJ3QSaA34JAbElsn7q30XIHxK5g4L6eu6e4m4Eh8yNJCS1/nd9N+/N3rQ8UWXi -Oln3whHSk0YYOKzc8YvbjLeEFtlPLEBiTdDPEUaIMqxr8SUPkUL487qKjE5rywdy -64l5Ib9JJqthfjWehp0E7Tw7NqHNjHENvzqdxHW/OB0en+kBnPenPwl0p0JKTN3r -X0Cyq5Wkz25Jc1yGXpktFXtGrFBeq2xGgpG5zNL7h5ebPYqxALl38+o2bjvrLOT8 -lwZvq4Tvps1oda4+Xyt93cBHdbROHThfjd6YHQ36CqVrR8sCK3YZXoye9F0c6k8C -AwEAAaOBgzCBgDAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIw -DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUGsVDnIhNdUWkgi/J0EG9fErnUYIwHwYD -VR0jBBgwFoAUSyUMTRrt7dPiYTfTcbKhi2+DXgYwCwYDVR0RBAQwAoIAMA0GCSqG -SIb3DQEBCwUAA4IBAQCv2VUgWOziEqhYmyYYNjhlynjE/tfheuewg+6/y9sHrspA -fLcAW3R7waH1a7BUcX7TsIk9Pn0MJktX2qO8Y+uuXuvv5s9jl5SDLw8pRjTIGIla -dr5RLsJWdVYsNRdm3wjACVIwJkRpW4+uaXLj3/7SML0S0pLeHkRUPblhoMfK8ZOR -2CIkjbYjMgjz7SLqOe7KT3T9yLIDbcDceqA5oVEP+hEbA5MjEhvIGopryYfZRI6L -4LQ+kHuqlOwBRw6HYvhlVhMo2UOmyCkHBhC71Il5a8RcIXZSV70jB1W/cihemNAA -6Knqn8HdnjF88Sg+nBCpQm9NmOjHJrJ8nW9mX6wL ------END CERTIFICATE----- diff --git a/tests/integrations/client/cert/gencerts.sh b/tests/integrations/client/cert/gencerts.sh deleted file mode 100755 index 606e05c60d9..00000000000 --- a/tests/integrations/client/cert/gencerts.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'cert'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert -initca ca-csr.json | cfssljson -bare ca - - -# pd-server -echo '{"CN":"pd-server","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server -hostname="localhost,127.0.0.1" - | cfssljson -bare pd-server - -# client -echo '{"CN":"client","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client -hostname="" - | cfssljson -bare client diff --git a/tests/integrations/client/cert/pd-server-key.pem b/tests/integrations/client/cert/pd-server-key.pem deleted file mode 100644 index 47f075915ae..00000000000 --- a/tests/integrations/client/cert/pd-server-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvZbiYx6ENOXWDyLkplST8FrGTKC1EL/SG/bLtBIKj4M2XlJH -KUbSczs0tKzUK/C7i8xP4+aJvM4jP/HM1hc1uOrJIh4XGm7PaXdMqrGlmhBdFN1T -xF0+Df0p/Mcc8uilOw4Qprsy4qdugzTryZSPwlxmgK/TY20F5w0i+AaRkK65zS5m -5Tu6zuA83oa8l1J4SMiAlRq5D/VVVjD39WaN2V3zNhiytdewVTiGWPg5VA44Y/13 -tAcB3szQlJkjxGWBjoPJwUSixNVkOLJNwwGQuBPQSXOGHQN9fNCAmBfCgccyqxR6 -4qildhjn6mCMmb2PoFInHtJzCPwSkvULZbwAVwIDAQABAoIBAQCTde4zpiKyeina -sAskDBjdE9208DKlCm3D7ltWfcnVAfuhtWFAC6PYmK5P8TU90AovnARvgHqVbcgh -DlOCGZ9hasxySvExUArnPzFUesQRZrTfa56h1txTSRwFTmGH1cfefPwakvPrMstU -Ji9XJMQEKpZXdWp3pO64XZFbVNCXtNc5agJd2ZQu5N+NV2eGqI1BpdEVD1CRr1d/ -6nLU3VUDt33dDBhb/Tou9HYXIoTum4pJ0NGKaw7n9h9pccKhQe7XEwzXGRxy+91O -ZZbmvJMCrwB3zGmZvQfz93ekBOXLmYNGKxUhHWgL0z0ujJQpCkWQ8zhoIMLi+3V3 -zejgq8yBAoGBAPAV4NOV9ir6EfL1bmvtnSm4V1AlA1fmOBeZnW/w9uw5ZyYXSLRx -sy4CKkkfu7nEEIC5Lza2jLT9eKhQhT84fpnmhpNqVwkIHqZU/vW7aFbC3R1F8ucx -0mFJuggSu+gl5SltzNxNyCcQXkUedzvugp+u42XYvTDLDBkF5YEJxUqhAoGBAMoo -HWTTPpnX1XpscKFt2ogY4Qd7kewcguM9qpbgWI9jnXqkw5JE4L/GV13hNvk74J/i -rUBcdsnGwZ0S3Vdy1qmoUjU2vw6u0ilNYfAjtInIr+Nxhg7KsfsEorjJhFe1sV3s -vhNqkUc83Ls8R1EFt6zCrBvoNn2I8l+GWq3Ap5/3AoGAVDD35HrkFnIXNUIH9OUg -he7U/4/bOknLRctiwEyZ6oC+wUbNAioVEX9E4Nu18xDkUJ+gBOnDMfzpAO0rrN3c -149VhgB2gP5N8TpBJl8P2cz/yTIsWhLImcFG8WnQZzpSwFJ20pKcK8a7qDVkzql3 -ORQVPjPhNGkf1u6Qas3IV8ECgYB5fNwVUKIpYD0p4mtZiCQxcAokLg3GwGekWtXe -8Booqevk3YmuqHgMqAyvHX6lD2fxjg8iQ68bmbeXRtQADsKUmNQ2qJzvHP6t1ZRK -9Zo0x4d0fy/aSdPmD6YeuPQwpb9eu7yK/JssHZZla1dMiMwWJbO63bWJgbhu0dZC -3ymdhwKBgAMzccLBobvGP10npwY/xyc93XuZs9leIVM8WDBemePyNsXmzLLaXwTn -a+P/jTuwjifZ+9MBXIzgEHx2GlD+pOJjp9dS2y3rsn4b5f4OhoG8hl1Bfh5E5LhU -kAuFXcXWn4WivLE5bVWhBEUXlTgXk7gtQoaLcwm1g6g/OVHABAlu ------END RSA PRIVATE KEY----- diff --git a/tests/integrations/client/cert/pd-server.csr b/tests/integrations/client/cert/pd-server.csr deleted file mode 100644 index 917ad17a1e1..00000000000 --- a/tests/integrations/client/cert/pd-server.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICdzCCAV8CAQAwFDESMBAGA1UEAxMJcGQtc2VydmVyMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAvZbiYx6ENOXWDyLkplST8FrGTKC1EL/SG/bLtBIK -j4M2XlJHKUbSczs0tKzUK/C7i8xP4+aJvM4jP/HM1hc1uOrJIh4XGm7PaXdMqrGl -mhBdFN1TxF0+Df0p/Mcc8uilOw4Qprsy4qdugzTryZSPwlxmgK/TY20F5w0i+AaR -kK65zS5m5Tu6zuA83oa8l1J4SMiAlRq5D/VVVjD39WaN2V3zNhiytdewVTiGWPg5 -VA44Y/13tAcB3szQlJkjxGWBjoPJwUSixNVkOLJNwwGQuBPQSXOGHQN9fNCAmBfC -gccyqxR64qildhjn6mCMmb2PoFInHtJzCPwSkvULZbwAVwIDAQABoB4wHAYJKoZI -hvcNAQkOMQ8wDTALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBACbATy3O -4uCBtJRkvijMTSLBOu+4H9huXfV/WJ7ccTVbtqT3/xq4agG45JSX5K0B9cUaBZIW -UwFpyvDjSwwH6rjZzES0Py6tt0NL8/iZpTyHv4nPMoVbaeUKTJhknQCgPorsAd7r -JRL2bIlnaBBqECI08Sq1sNUD2Va23vzcGG2W4V9oPJeAnhaT8yKt83ZCt4yRCf4I -wxKNhpTUhm62Vhc2Ijo/5ktwsye10vTX9YbuAQPY7K//jbGDtJH5+biO4/kNR7kg -06PYZJJtqEzgaf4VuOW/c9K4q7Y5QhS3tmEECpR3xG30x1yw3aCqXkFtE43O3Yr1 -CMTKQvTHiu8RpPE= ------END CERTIFICATE REQUEST----- diff --git a/tests/integrations/client/cert/pd-server.pem b/tests/integrations/client/cert/pd-server.pem deleted file mode 100644 index 2381a662d26..00000000000 --- a/tests/integrations/client/cert/pd-server.pem +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDljCCAn6gAwIBAgIUYSbOyApfQYwp07xKToCAXeeLqbowDQYJKoZIhvcNAQEL -BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl -aWppbmcxEDAOBgNVBAoTB1BpbmdjYXAxEjAQBgNVBAMTCU15IG93biBDQTAeFw0y -MDAyMjcwMzU1MDBaFw0zMDAyMjQwMzU1MDBaMBQxEjAQBgNVBAMTCXBkLXNlcnZl -cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL2W4mMehDTl1g8i5KZU -k/BaxkygtRC/0hv2y7QSCo+DNl5SRylG0nM7NLSs1Cvwu4vMT+PmibzOIz/xzNYX -NbjqySIeFxpuz2l3TKqxpZoQXRTdU8RdPg39KfzHHPLopTsOEKa7MuKnboM068mU -j8JcZoCv02NtBecNIvgGkZCuuc0uZuU7us7gPN6GvJdSeEjIgJUauQ/1VVYw9/Vm -jdld8zYYsrXXsFU4hlj4OVQOOGP9d7QHAd7M0JSZI8RlgY6DycFEosTVZDiyTcMB -kLgT0Elzhh0DfXzQgJgXwoHHMqsUeuKopXYY5+pgjJm9j6BSJx7Scwj8EpL1C2W8 -AFcCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUH -AwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFKVYxaz4G7wC+rks -vuCBoyM1OW0AMB8GA1UdIwQYMBaAFEslDE0a7e3T4mE303GyoYtvg14GMBoGA1Ud -EQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAlzKKrmPC -ApTZV0qPgHLUHPPHA3FGnuBeJNLiNn4h+F4aQfIOXpDLEJPG+KzMiMWdujP+vpjH -ZPPy68HlZCvbZXtNohggrzrSdZyggwYm292PPE4OllfSrg+1M1fxb57+5UXedRKm -SgzaSjDgeogjOoB3J7sHWfT2RTI6sYiIQrgfipL3060k64usNO1lVnL+wzTovwU+ -149bz/0MifjF8sBTNUGSr9V4csRsRt+R6NkoIbCCwjyIeuFmrUjWcKsjA+SU8Po6 -OyHpxjAVY5jsvPKGz7i4G7Zw+4JI03SeZr6r3WVNyHmUkLKNfr6J6sXfFKZstHAL -g7R41DuOTcwd3w== ------END CERTIFICATE----- diff --git a/tests/integrations/client/cert_opt.sh b/tests/integrations/client/cert_opt.sh new file mode 100755 index 00000000000..3984e67f3ab --- /dev/null +++ b/tests/integrations/client/cert_opt.sh @@ -0,0 +1,55 @@ +#!/bin/bash +cert_dir="$2" + +function generate_certs() { + if [[ ! -z "$cert_dir" ]]; then + cd "$cert_dir" || exit 255 # Change to the specified directory + fi + + if ! [[ "$0" =~ "cert_opt.sh" ]]; then + echo "must be run from 'cert'" + exit 255 + fi + + if ! which openssl; then + echo "openssl is not installed" + exit 255 + fi + + # Generate CA private key and self-signed certificate + openssl genpkey -algorithm RSA -out ca-key.pem + openssl req -new -x509 -key ca-key.pem -out ca.pem -days 1 -subj "/CN=ca" + # pd-server + openssl genpkey -algorithm RSA -out pd-server-key.pem + openssl req -new -key pd-server-key.pem -out pd-server.csr -subj "/CN=pd-server" + + # Add IP address as a SAN + echo "subjectAltName = IP:127.0.0.1" > extfile.cnf + openssl x509 -req -in pd-server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out pd-server.pem -days 1 -extfile extfile.cnf + + # Clean up the temporary extension file + rm extfile.cnf + + # client + openssl genpkey -algorithm RSA -out client-key.pem + openssl req -new -key client-key.pem -out client.csr -subj "/CN=client" + openssl x509 -req -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client.pem -days 1 +} + +function cleanup_certs() { + if [[ ! -z "$cert_dir" ]]; then + cd "$cert_dir" || exit 255 # Change to the specified directory + fi + + rm -f ca.pem ca-key.pem ca.srl + rm -f pd-server.pem pd-server-key.pem pd-server.csr + rm -f client.pem client-key.pem client.csr +} + +if [[ "$1" == "generate" ]]; then + generate_certs +elif [[ "$1" == "cleanup" ]]; then + cleanup_certs +else + echo "Usage: $0 [generate|cleanup] " +fi diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index 8ada9f9d519..41e7e650261 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -804,7 +804,7 @@ func (suite *clientTestSuite) SetupSuite() { })) suite.grpcSvr.GetRaftCluster().GetBasicCluster().PutStore(newStore) } - cluster.GetStoreConfig().SetRegionBucketEnabled(true) + cluster.GetOpts().(*config.PersistOptions).SetRegionBucketEnabled(true) } func (suite *clientTestSuite) TearDownSuite() { @@ -893,7 +893,7 @@ func (suite *clientTestSuite) TestGetRegion() { } return r.Buckets != nil }) - suite.srv.GetRaftCluster().GetStoreConfig().SetRegionBucketEnabled(false) + suite.srv.GetRaftCluster().GetOpts().(*config.PersistOptions).SetRegionBucketEnabled(false) testutil.Eventually(re, func() bool { r, err := suite.client.GetRegion(context.Background(), []byte("a"), pd.WithBuckets()) @@ -903,7 +903,7 @@ func (suite *clientTestSuite) TestGetRegion() { } return r.Buckets == nil }) - suite.srv.GetRaftCluster().GetStoreConfig().SetRegionBucketEnabled(true) + suite.srv.GetRaftCluster().GetOpts().(*config.PersistOptions).SetRegionBucketEnabled(true) suite.NoError(failpoint.Enable("github.com/tikv/pd/server/grpcClientClosed", `return(true)`)) suite.NoError(failpoint.Enable("github.com/tikv/pd/server/useForwardRequest", `return(true)`)) diff --git a/tests/integrations/client/client_tls_test.go b/tests/integrations/client/client_tls_test.go index 1863e206584..b46895f4f8c 100644 --- a/tests/integrations/client/client_tls_test.go +++ b/tests/integrations/client/client_tls_test.go @@ -20,6 +20,7 @@ import ( "io" "net/http" "os" + "os/exec" "path/filepath" "strings" "testing" @@ -36,7 +37,10 @@ import ( ) var ( - testTLSInfo = transport.TLSInfo{ + certPath = "./cert" + certExpiredPath = "./cert-expired" + certScript = "./cert_opt.sh" + testTLSInfo = transport.TLSInfo{ KeyFile: "./cert/pd-server-key.pem", CertFile: "./cert/pd-server.pem", TrustedCAFile: "./cert/ca.pem", @@ -59,6 +63,26 @@ var ( // when all certs are atomically replaced by directory renaming. // And expects server to reject client requests, and vice versa. func TestTLSReloadAtomicReplace(t *testing.T) { + // generate certs + for _, path := range []string{certPath, certExpiredPath} { + if err := os.Mkdir(path, 0755); err != nil { + t.Fatal(err) + } + if err := exec.Command(certScript, "generate", path).Run(); err != nil { + t.Fatal(err) + } + } + defer func() { + for _, path := range []string{certPath, certExpiredPath} { + if err := exec.Command(certScript, "cleanup", path).Run(); err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(path); err != nil { + t.Fatal(err) + } + } + }() + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index acb1c458812..24ee8506efd 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -153,12 +153,14 @@ func (suite *gcClientTestSuite) testClientWatchWithRevision(fromNewRevision bool watchChan, err := suite.client.WatchGCSafePointV2(suite.server.Context(), startRevision) suite.NoError(err) - timeout := time.After(time.Second) - + timer := time.NewTimer(time.Second) + defer timer.Stop() isFirstUpdate := true + runTest := false for { select { - case <-timeout: + case <-timer.C: + suite.True(runTest) return case res := <-watchChan: for _, r := range res { @@ -174,6 +176,7 @@ func (suite *gcClientTestSuite) testClientWatchWithRevision(fromNewRevision bool continue } } + runTest = true } } } diff --git a/tests/integrations/client/global_config_test.go b/tests/integrations/client/global_config_test.go index 15034d035a6..6384adbd8f1 100644 --- a/tests/integrations/client/global_config_test.go +++ b/tests/integrations/client/global_config_test.go @@ -278,19 +278,20 @@ func (suite *globalConfigTestSuite) TestClientStore() { } func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { + ctx := suite.server.Context() defer func() { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath("test")) + _, err := suite.server.GetClient().Delete(ctx, suite.GetEtcdPath("test")) suite.NoError(err) for i := 3; i < 9; i++ { - _, err := suite.server.GetClient().Delete(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i))) + _, err := suite.server.GetClient().Delete(ctx, suite.GetEtcdPath(strconv.Itoa(i))) suite.NoError(err) } }() // Mock get revision by loading - r, err := suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath("test"), "test") + r, err := suite.server.GetClient().Put(ctx, suite.GetEtcdPath("test"), "test") suite.NoError(err) - res, revision, err := suite.client.LoadGlobalConfig(suite.server.Context(), nil, globalConfigPath) + res, revision, err := suite.client.LoadGlobalConfig(ctx, nil, globalConfigPath) suite.NoError(err) suite.Len(res, 1) suite.LessOrEqual(r.Header.GetRevision(), revision) @@ -313,14 +314,19 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { _, err = suite.server.GetClient().Put(suite.server.Context(), suite.GetEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) suite.NoError(err) } + timer := time.NewTimer(time.Second) + defer timer.Stop() + runTest := false for { select { - case <-time.After(time.Second): + case <-timer.C: + suite.True(runTest) return case res := <-configChan: for _, r := range res { suite.Equal(suite.GetEtcdPath(r.Value), r.Name) } + runTest = true } } } diff --git a/tests/integrations/client/go.mod b/tests/integrations/client/go.mod index a308c176eb6..d9e37dc9d16 100644 --- a/tests/integrations/client/go.mod +++ b/tests/integrations/client/go.mod @@ -1,6 +1,6 @@ module github.com/tikv/pd/tests/integrations/client -go 1.20 +go 1.21 replace ( github.com/tikv/pd => ../../../ diff --git a/tests/integrations/client/go.sum b/tests/integrations/client/go.sum index 3f4906e6848..5e8061733aa 100644 --- a/tests/integrations/client/go.sum +++ b/tests/integrations/client/go.sum @@ -65,6 +65,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpfpgJhvkuYWBmOYbo= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= @@ -183,11 +184,14 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -257,14 +261,23 @@ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= +github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -333,6 +346,7 @@ github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= +github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus= github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -359,6 +373,7 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= @@ -506,6 +521,7 @@ github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= +github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= @@ -614,6 +630,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -810,9 +827,11 @@ gorm.io/datatypes v1.1.0/go.mod h1:SH2K9R+2RMjuX1CkCONrPwoe9JzVv2hkQvEu4bXGojE= gorm.io/driver/mysql v1.4.5 h1:u1lytId4+o9dDaNcPCFzNv7h6wvmc92UjNk3z8enSBU= gorm.io/driver/mysql v1.4.5/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc= gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= +gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0= +gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig= gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= diff --git a/tests/integrations/mcs/go.mod b/tests/integrations/mcs/go.mod index 738c9f56a74..284810c5f1a 100644 --- a/tests/integrations/mcs/go.mod +++ b/tests/integrations/mcs/go.mod @@ -1,6 +1,6 @@ module github.com/tikv/pd/tests/integrations/mcs -go 1.20 +go 1.21 replace ( github.com/tikv/pd => ../../../ diff --git a/tests/integrations/mcs/go.sum b/tests/integrations/mcs/go.sum index 225d23d4e92..b30af0f68fc 100644 --- a/tests/integrations/mcs/go.sum +++ b/tests/integrations/mcs/go.sum @@ -65,6 +65,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpfpgJhvkuYWBmOYbo= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= @@ -125,6 +126,7 @@ github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g= github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs= @@ -186,11 +188,14 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -259,14 +264,23 @@ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= +github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -335,6 +349,7 @@ github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= +github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus= github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -353,6 +368,7 @@ github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/R github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= @@ -360,8 +376,10 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= @@ -378,6 +396,7 @@ github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7c github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -506,6 +525,7 @@ github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= +github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= @@ -614,6 +634,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -808,9 +829,11 @@ gorm.io/datatypes v1.1.0/go.mod h1:SH2K9R+2RMjuX1CkCONrPwoe9JzVv2hkQvEu4bXGojE= gorm.io/driver/mysql v1.4.5 h1:u1lytId4+o9dDaNcPCFzNv7h6wvmc92UjNk3z8enSBU= gorm.io/driver/mysql v1.4.5/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc= gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= +gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0= +gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig= gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= diff --git a/tests/integrations/mcs/scheduling/config_test.go b/tests/integrations/mcs/scheduling/config_test.go index 29f99c66f4c..e2f2eeacfd1 100644 --- a/tests/integrations/mcs/scheduling/config_test.go +++ b/tests/integrations/mcs/scheduling/config_test.go @@ -81,17 +81,31 @@ func (suite *configTestSuite) TestConfigWatch() { re.Equal(sc.DefaultSplitMergeInterval, watcher.GetScheduleConfig().SplitMergeInterval.Duration) re.Equal("0.0.0", watcher.GetClusterVersion().String()) // Update the config and check if the scheduling config watcher can get the latest value. - suite.pdLeaderServer.GetPersistOptions().SetMaxReplicas(5) + persistOpts := suite.pdLeaderServer.GetPersistOptions() + persistOpts.SetMaxReplicas(5) persistConfig(re, suite.pdLeaderServer) testutil.Eventually(re, func() bool { return watcher.GetReplicationConfig().MaxReplicas == 5 }) - suite.pdLeaderServer.GetPersistOptions().SetSplitMergeInterval(2 * sc.DefaultSplitMergeInterval) + persistOpts.SetSplitMergeInterval(2 * sc.DefaultSplitMergeInterval) persistConfig(re, suite.pdLeaderServer) testutil.Eventually(re, func() bool { return watcher.GetScheduleConfig().SplitMergeInterval.Duration == 2*sc.DefaultSplitMergeInterval }) - suite.pdLeaderServer.GetPersistOptions().SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + persistOpts.SetStoreConfig(&sc.StoreConfig{ + Coprocessor: sc.Coprocessor{ + RegionMaxSize: "144MiB", + }, + Storage: sc.Storage{ + Engine: sc.RaftstoreV2, + }, + }) + persistConfig(re, suite.pdLeaderServer) + testutil.Eventually(re, func() bool { + return watcher.GetStoreConfig().GetRegionMaxSize() == 144 && + watcher.GetStoreConfig().IsRaftKV2() + }) + persistOpts.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) persistConfig(re, suite.pdLeaderServer) testutil.Eventually(re, func() bool { return watcher.GetClusterVersion().String() == "4.0.0" diff --git a/tests/integrations/mcs/scheduling/rule_test.go b/tests/integrations/mcs/scheduling/rule_test.go new file mode 100644 index 00000000000..bdffb6b2bb9 --- /dev/null +++ b/tests/integrations/mcs/scheduling/rule_test.go @@ -0,0 +1,238 @@ +// Copyright 2023 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduling + +import ( + "context" + "sort" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/tikv/pd/pkg/keyspace" + "github.com/tikv/pd/pkg/mcs/scheduling/server/rule" + "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/schedule/labeler" + "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/utils/testutil" + "github.com/tikv/pd/tests" +) + +type ruleTestSuite struct { + suite.Suite + + ctx context.Context + cancel context.CancelFunc + + // The PD cluster. + cluster *tests.TestCluster + // pdLeaderServer is the leader server of the PD cluster. + pdLeaderServer *tests.TestServer +} + +func TestRule(t *testing.T) { + suite.Run(t, &ruleTestSuite{}) +} + +func (suite *ruleTestSuite) SetupSuite() { + re := suite.Require() + + var err error + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) + re.NoError(err) + err = suite.cluster.RunInitialServers() + re.NoError(err) + leaderName := suite.cluster.WaitLeader() + suite.pdLeaderServer = suite.cluster.GetServer(leaderName) + re.NoError(suite.pdLeaderServer.BootstrapCluster()) +} + +func (suite *ruleTestSuite) TearDownSuite() { + suite.cancel() + suite.cluster.Destroy() +} + +func loadRules(re *require.Assertions, ruleStorage endpoint.RuleStorage) (rules []*placement.Rule) { + err := ruleStorage.LoadRules(func(_, v string) { + r, err := placement.NewRuleFromJSON([]byte(v)) + re.NoError(err) + rules = append(rules, r) + }) + re.NoError(err) + return +} + +func loadRuleGroups(re *require.Assertions, ruleStorage endpoint.RuleStorage) (groups []*placement.RuleGroup) { + err := ruleStorage.LoadRuleGroups(func(_, v string) { + rg, err := placement.NewRuleGroupFromJSON([]byte(v)) + re.NoError(err) + groups = append(groups, rg) + }) + re.NoError(err) + return +} + +func loadRegionRules(re *require.Assertions, ruleStorage endpoint.RuleStorage) (rules []*labeler.LabelRule) { + err := ruleStorage.LoadRegionRules(func(_, v string) { + lr, err := labeler.NewLabelRuleFromJSON([]byte(v)) + re.NoError(err) + rules = append(rules, lr) + }) + re.NoError(err) + return +} + +func (suite *ruleTestSuite) TestRuleWatch() { + re := suite.Require() + + // Create a rule watcher. + clusterID := suite.cluster.GetCluster().GetId() + watcher, err := rule.NewWatcher( + suite.ctx, + suite.pdLeaderServer.GetEtcdClient(), + endpoint.RulesPath(clusterID), + endpoint.RuleGroupPath(clusterID), + endpoint.RegionLabelPath(clusterID), + ) + re.NoError(err) + ruleStorage := watcher.GetRuleStorage() + // Check the default rule. + rules := loadRules(re, ruleStorage) + re.Len(rules, 1) + re.Equal("pd", rules[0].GroupID) + re.Equal("default", rules[0].ID) + re.Equal(0, rules[0].Index) + re.Empty(rules[0].StartKey) + re.Empty(rules[0].EndKey) + re.Equal(placement.Voter, rules[0].Role) + re.Empty(rules[0].LocationLabels) + // Check the empty rule group. + ruleGroups := loadRuleGroups(re, ruleStorage) + re.NoError(err) + re.Empty(ruleGroups) + // Set a new rule via the PD API server. + ruleManager := suite.pdLeaderServer.GetRaftCluster().GetRuleManager() + rule := &placement.Rule{ + GroupID: "2", + ID: "3", + Role: "voter", + Count: 1, + StartKeyHex: "22", + EndKeyHex: "dd", + } + err = ruleManager.SetRule(rule) + re.NoError(err) + testutil.Eventually(re, func() bool { + rules = loadRules(re, ruleStorage) + return len(rules) == 2 + }) + sort.Slice(rules, func(i, j int) bool { + return rules[i].ID > rules[j].ID + }) + re.Len(rules, 2) + re.Equal(rule.GroupID, rules[1].GroupID) + re.Equal(rule.ID, rules[1].ID) + re.Equal(rule.Role, rules[1].Role) + re.Equal(rule.Count, rules[1].Count) + re.Equal(rule.StartKeyHex, rules[1].StartKeyHex) + re.Equal(rule.EndKeyHex, rules[1].EndKeyHex) + // Delete the rule. + err = ruleManager.DeleteRule(rule.GroupID, rule.ID) + re.NoError(err) + testutil.Eventually(re, func() bool { + rules = loadRules(re, ruleStorage) + return len(rules) == 1 + }) + re.Len(rules, 1) + re.Equal("pd", rules[0].GroupID) + // Create a new rule group. + ruleGroup := &placement.RuleGroup{ + ID: "2", + Index: 100, + Override: true, + } + err = ruleManager.SetRuleGroup(ruleGroup) + re.NoError(err) + testutil.Eventually(re, func() bool { + ruleGroups = loadRuleGroups(re, ruleStorage) + return len(ruleGroups) == 1 + }) + re.Len(ruleGroups, 1) + re.Equal(ruleGroup.ID, ruleGroups[0].ID) + re.Equal(ruleGroup.Index, ruleGroups[0].Index) + re.Equal(ruleGroup.Override, ruleGroups[0].Override) + // Delete the rule group. + err = ruleManager.DeleteRuleGroup(ruleGroup.ID) + re.NoError(err) + testutil.Eventually(re, func() bool { + ruleGroups = loadRuleGroups(re, ruleStorage) + return len(ruleGroups) == 0 + }) + re.Empty(ruleGroups) + + // Test the region label rule watch. + labelRules := loadRegionRules(re, ruleStorage) + re.Len(labelRules, 1) + defaultKeyspaceRule := keyspace.MakeLabelRule(utils.DefaultKeyspaceID) + re.Equal(defaultKeyspaceRule, labelRules[0]) + // Set a new region label rule. + labelRule := &labeler.LabelRule{ + ID: "rule1", + Labels: []labeler.RegionLabel{{Key: "k1", Value: "v1"}}, + RuleType: "key-range", + Data: labeler.MakeKeyRanges("1234", "5678"), + } + regionLabeler := suite.pdLeaderServer.GetRaftCluster().GetRegionLabeler() + err = regionLabeler.SetLabelRule(labelRule) + re.NoError(err) + testutil.Eventually(re, func() bool { + labelRules = loadRegionRules(re, ruleStorage) + return len(labelRules) == 2 + }) + sort.Slice(labelRules, func(i, j int) bool { + return labelRules[i].ID < labelRules[j].ID + }) + re.Len(labelRules, 2) + re.Equal(labelRule.ID, labelRules[1].ID) + re.Equal(labelRule.Labels, labelRules[1].Labels) + re.Equal(labelRule.RuleType, labelRules[1].RuleType) + // Patch the region label rule. + labelRule = &labeler.LabelRule{ + ID: "rule2", + Labels: []labeler.RegionLabel{{Key: "k2", Value: "v2"}}, + RuleType: "key-range", + Data: labeler.MakeKeyRanges("ab12", "cd12"), + } + patch := labeler.LabelRulePatch{ + SetRules: []*labeler.LabelRule{labelRule}, + DeleteRules: []string{"rule1"}, + } + err = regionLabeler.Patch(patch) + re.NoError(err) + testutil.Eventually(re, func() bool { + labelRules = loadRegionRules(re, ruleStorage) + return len(labelRules) == 2 + }) + sort.Slice(labelRules, func(i, j int) bool { + return labelRules[i].ID < labelRules[j].ID + }) + re.Len(labelRules, 2) + re.Equal(defaultKeyspaceRule, labelRules[0]) + re.Equal(labelRule.ID, labelRules[1].ID) + re.Equal(labelRule.Labels, labelRules[1].Labels) + re.Equal(labelRule.RuleType, labelRules[1].RuleType) +} diff --git a/tests/integrations/tso/go.mod b/tests/integrations/tso/go.mod index c76efe9271b..d559e573ec7 100644 --- a/tests/integrations/tso/go.mod +++ b/tests/integrations/tso/go.mod @@ -1,6 +1,6 @@ module github.com/tikv/pd/tests/integrations/tso -go 1.20 +go 1.21 replace ( github.com/tikv/pd => ../../../ diff --git a/tests/integrations/tso/go.sum b/tests/integrations/tso/go.sum index b04353eb0b6..0503d79d066 100644 --- a/tests/integrations/tso/go.sum +++ b/tests/integrations/tso/go.sum @@ -65,6 +65,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch h1:KLE/YeX+9FNaGVW5MtImRVPhjDpfpgJhvkuYWBmOYbo= github.com/breeswish/gin-jwt/v2 v2.6.4-jwt-patch/go.mod h1:KjBLriHXe7L6fGceqWzTod8HUB/TP1WWDtfuSYtYXaI= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= @@ -183,11 +184,14 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -257,14 +261,23 @@ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k= +github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -333,6 +346,7 @@ github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= +github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus= github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -359,6 +373,7 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= @@ -375,6 +390,7 @@ github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36/go.mod h1:pxMtw7c github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -504,6 +520,7 @@ github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= +github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= @@ -612,6 +629,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -807,9 +825,11 @@ gorm.io/datatypes v1.1.0/go.mod h1:SH2K9R+2RMjuX1CkCONrPwoe9JzVv2hkQvEu4bXGojE= gorm.io/driver/mysql v1.4.5 h1:u1lytId4+o9dDaNcPCFzNv7h6wvmc92UjNk3z8enSBU= gorm.io/driver/mysql v1.4.5/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc= gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= +gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0= +gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig= gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= diff --git a/tests/pdctl/hot/hot_test.go b/tests/pdctl/hot/hot_test.go index 2dfa89acb52..352b891c092 100644 --- a/tests/pdctl/hot/hot_test.go +++ b/tests/pdctl/hot/hot_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/typeutil" @@ -82,9 +83,9 @@ func TestHot(t *testing.T) { newStats.KeysWritten = keysWritten newStats.KeysRead = keysRead rc := leaderServer.GetRaftCluster() - for i := statistics.DefaultWriteMfSize; i > 0; i-- { - start := uint64(now - statistics.StoreHeartBeatReportInterval*int64(i)) - end := start + statistics.StoreHeartBeatReportInterval + for i := utils.DefaultWriteMfSize; i > 0; i-- { + start := uint64(now - utils.StoreHeartBeatReportInterval*int64(i)) + end := start + utils.StoreHeartBeatReportInterval newStats.Interval = &pdpb.TimeInterval{StartTimestamp: start, EndTimestamp: end} rc.GetStoresStats().Observe(ss.GetID(), newStats) } @@ -98,10 +99,10 @@ func TestHot(t *testing.T) { re.NoError(err) hotStores := api.HotStoreStats{} re.NoError(json.Unmarshal(output, &hotStores)) - re.Equal(float64(bytesWritten)/statistics.StoreHeartBeatReportInterval, hotStores.BytesWriteStats[1]) - re.Equal(float64(bytesRead)/statistics.StoreHeartBeatReportInterval, hotStores.BytesReadStats[1]) - re.Equal(float64(keysWritten)/statistics.StoreHeartBeatReportInterval, hotStores.KeysWriteStats[1]) - re.Equal(float64(keysRead)/statistics.StoreHeartBeatReportInterval, hotStores.KeysReadStats[1]) + re.Equal(float64(bytesWritten)/utils.StoreHeartBeatReportInterval, hotStores.BytesWriteStats[1]) + re.Equal(float64(bytesRead)/utils.StoreHeartBeatReportInterval, hotStores.BytesReadStats[1]) + re.Equal(float64(keysWritten)/utils.StoreHeartBeatReportInterval, hotStores.KeysWriteStats[1]) + re.Equal(float64(keysRead)/utils.StoreHeartBeatReportInterval, hotStores.KeysReadStats[1]) re.Equal(float64(bytesWritten), hotStores.BytesWriteStats[2]) re.Equal(float64(keysWritten), hotStores.KeysWriteStats[2]) @@ -133,12 +134,12 @@ func TestHot(t *testing.T) { switch hotType { case "read": loads := []float64{ - statistics.RegionReadBytes: float64(1000000000 * reportInterval), - statistics.RegionReadKeys: float64(1000000000 * reportInterval), - statistics.RegionReadQueryNum: float64(1000000000 * reportInterval), - statistics.RegionWriteBytes: 0, - statistics.RegionWriteKeys: 0, - statistics.RegionWriteQueryNum: 0, + utils.RegionReadBytes: float64(1000000000 * reportInterval), + utils.RegionReadKeys: float64(1000000000 * reportInterval), + utils.RegionReadQueryNum: float64(1000000000 * reportInterval), + utils.RegionWriteBytes: 0, + utils.RegionWriteKeys: 0, + utils.RegionWriteQueryNum: 0, } leader := &metapb.Peer{ Id: 100 + regionIDCounter, @@ -150,10 +151,10 @@ func TestHot(t *testing.T) { }, leader) rc.GetHotStat().CheckReadAsync(statistics.NewCheckPeerTask(peerInfo, region)) testutil.Eventually(re, func() bool { - hotPeerStat := rc.GetHotPeerStat(statistics.Read, hotRegionID, hotStoreID) + hotPeerStat := rc.GetHotPeerStat(utils.Read, hotRegionID, hotStoreID) return hotPeerStat != nil }) - if reportInterval >= statistics.ReadReportInterval { + if reportInterval >= utils.StoreHeartBeatReportInterval { count++ } testHot(hotRegionID, hotStoreID, "read") @@ -164,10 +165,10 @@ func TestHot(t *testing.T) { []byte("c"), []byte("d"), core.SetWrittenBytes(1000000000*reportInterval), core.SetReportInterval(0, reportInterval)) testutil.Eventually(re, func() bool { - hotPeerStat := rc.GetHotPeerStat(statistics.Write, hotRegionID, hotStoreID) + hotPeerStat := rc.GetHotPeerStat(utils.Write, hotRegionID, hotStoreID) return hotPeerStat != nil }) - if reportInterval >= statistics.WriteReportInterval { + if reportInterval >= utils.RegionHeartBeatReportInterval { count++ } testHot(hotRegionID, hotStoreID, "write") @@ -177,20 +178,20 @@ func TestHot(t *testing.T) { reportIntervals := []uint64{ statistics.HotRegionReportMinInterval, statistics.HotRegionReportMinInterval + 1, - statistics.WriteReportInterval, - statistics.WriteReportInterval + 1, - statistics.WriteReportInterval * 2, - statistics.WriteReportInterval*2 + 1, + utils.RegionHeartBeatReportInterval, + utils.RegionHeartBeatReportInterval + 1, + utils.RegionHeartBeatReportInterval * 2, + utils.RegionHeartBeatReportInterval*2 + 1, } testCommand(reportIntervals, "write") count = 0 reportIntervals = []uint64{ statistics.HotRegionReportMinInterval, statistics.HotRegionReportMinInterval + 1, - statistics.ReadReportInterval, - statistics.ReadReportInterval + 1, - statistics.ReadReportInterval * 2, - statistics.ReadReportInterval*2 + 1, + utils.StoreHeartBeatReportInterval, + utils.StoreHeartBeatReportInterval + 1, + utils.StoreHeartBeatReportInterval * 2, + utils.StoreHeartBeatReportInterval*2 + 1, } testCommand(reportIntervals, "read") } @@ -228,15 +229,15 @@ func TestHotWithStoreID(t *testing.T) { } defer cluster.Destroy() - pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(0, statistics.WriteReportInterval)) - pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(0, statistics.WriteReportInterval)) - pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), core.SetReportInterval(0, statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(0, utils.RegionHeartBeatReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), core.SetReportInterval(0, utils.RegionHeartBeatReportInterval)) + pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), core.SetReportInterval(0, utils.RegionHeartBeatReportInterval)) // wait hot scheduler starts rc := leaderServer.GetRaftCluster() testutil.Eventually(re, func() bool { - return rc.GetHotPeerStat(statistics.Write, 1, 1) != nil && - rc.GetHotPeerStat(statistics.Write, 2, 2) != nil && - rc.GetHotPeerStat(statistics.Write, 3, 1) != nil + return rc.GetHotPeerStat(utils.Write, 1, 1) != nil && + rc.GetHotPeerStat(utils.Write, 2, 2) != nil && + rc.GetHotPeerStat(utils.Write, 3, 1) != nil }) args := []string{"-u", pdAddr, "hot", "write", "1"} output, err := pdctl.ExecuteCommand(cmd, args...) @@ -337,13 +338,13 @@ func TestHistoryHotRegions(t *testing.T) { defer cluster.Destroy() startTime := time.Now().Unix() pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f"), core.SetWrittenBytes(9000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) pdctl.MustPutRegion(re, cluster, 4, 3, []byte("g"), []byte("h"), core.SetWrittenBytes(9000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) // wait hot scheduler starts testutil.Eventually(re, func() bool { hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() @@ -451,15 +452,15 @@ func TestHotWithoutHotPeer(t *testing.T) { err := leaderServer.GetServer().GetRaftCluster().HandleStoreHeartbeat(&pdpb.StoreHeartbeatRequest{ Stats: &pdpb.StoreStats{ StoreId: store.Id, - BytesRead: uint64(load * statistics.StoreHeartBeatReportInterval), - KeysRead: uint64(load * statistics.StoreHeartBeatReportInterval), - BytesWritten: uint64(load * statistics.StoreHeartBeatReportInterval), - KeysWritten: uint64(load * statistics.StoreHeartBeatReportInterval), + BytesRead: uint64(load * utils.StoreHeartBeatReportInterval), + KeysRead: uint64(load * utils.StoreHeartBeatReportInterval), + BytesWritten: uint64(load * utils.StoreHeartBeatReportInterval), + KeysWritten: uint64(load * utils.StoreHeartBeatReportInterval), Capacity: 1000 * units.MiB, Available: 1000 * units.MiB, Interval: &pdpb.TimeInterval{ - StartTimestamp: timestamp + uint64(i*statistics.StoreHeartBeatReportInterval), - EndTimestamp: timestamp + uint64((i+1)*statistics.StoreHeartBeatReportInterval)}, + StartTimestamp: timestamp + uint64(i*utils.StoreHeartBeatReportInterval), + EndTimestamp: timestamp + uint64((i+1)*utils.StoreHeartBeatReportInterval)}, }, }, &pdpb.StoreHeartbeatResponse{}) re.NoError(err) diff --git a/tests/pdctl/keyspace/keyspace_group_test.go b/tests/pdctl/keyspace/keyspace_group_test.go index 1d0c8132c13..105e860ad17 100644 --- a/tests/pdctl/keyspace/keyspace_group_test.go +++ b/tests/pdctl/keyspace/keyspace_group_test.go @@ -561,8 +561,13 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { args := []string{"-u", pdAddr, "keyspace-group"} output, err := pdctl.ExecuteCommand(cmd, append(args, "1")...) re.NoError(err) + if strings.Contains(string(output), "Failed") { + // It may be failed when meets error, such as [PD:etcd:ErrEtcdTxnConflict]etcd transaction failed, conflicted and rolled back + re.Contains(string(output), "ErrEtcdTxnConflict", "output: %s", string(output)) + return false + } err = json.Unmarshal(output, &keyspaceGroup) - re.NoErrorf(err, "output: %s", string(output)) + re.NoError(err) return len(keyspaceGroup.Members) == 2 }) for _, member := range keyspaceGroup.Members { @@ -583,3 +588,49 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/tso/fastGroupSplitPatroller")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayStartServerLoop")) } + +func TestInPDMode(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) + err = tc.RunInitialServers() + re.NoError(err) + pdAddr := tc.GetConfig().GetClientURL() + cmd := pdctlCmd.GetRootCmd() + tc.WaitLeader() + leaderServer := tc.GetServer(tc.GetLeader()) + re.NoError(leaderServer.BootstrapCluster()) + + argsList := [][]string{ + {"-u", pdAddr, "keyspace-group"}, + {"-u", pdAddr, "keyspace-group", "0"}, + {"-u", pdAddr, "keyspace-group", "split", "0", "1", "2"}, + {"-u", pdAddr, "keyspace-group", "split-range", "1", "2", "3", "4"}, + {"-u", pdAddr, "keyspace-group", "finish-split", "1"}, + {"-u", pdAddr, "keyspace-group", "merge", "1", "2"}, + {"-u", pdAddr, "keyspace-group", "merge", "0", "--all"}, + {"-u", pdAddr, "keyspace-group", "finish-merge", "1"}, + {"-u", pdAddr, "keyspace-group", "set-node", "0", "http://127.0.0.1:2379"}, + {"-u", pdAddr, "keyspace-group", "set-priority", "0", "http://127.0.0.1:2379", "200"}, + {"-u", pdAddr, "keyspace-group", "primary", "0"}, + } + for _, args := range argsList { + output, err := pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Contains(string(output), "Failed", + "args: %v, output: %v", args, string(output)) + re.Contains(string(output), "keyspace group manager is not initialized", + "args: %v, output: %v", args, string(output)) + } + + leaderServer.SetKeyspaceManager(nil) + args := []string{"-u", pdAddr, "keyspace-group", "split", "0", "1", "2"} + output, err := pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Contains(string(output), "Failed", + "args: %v, output: %v", args, string(output)) + re.Contains(string(output), "keyspace manager is not initialized", + "args: %v, output: %v", args, string(output)) +} diff --git a/tests/pdctl/store/store_test.go b/tests/pdctl/store/store_test.go index 42ceaede428..0ac68e35d98 100644 --- a/tests/pdctl/store/store_test.go +++ b/tests/pdctl/store/store_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" - "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/server/api" "github.com/tikv/pd/tests" "github.com/tikv/pd/tests/pdctl" @@ -513,8 +513,8 @@ func TestTombstoneStore(t *testing.T) { pdctl.MustPutStore(re, leaderServer.GetServer(), store.Store.Store) } defer cluster.Destroy() - pdctl.MustPutRegion(re, cluster, 1, 2, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(0, statistics.WriteReportInterval)) - pdctl.MustPutRegion(re, cluster, 2, 3, []byte("b"), []byte("c"), core.SetWrittenBytes(3000000000), core.SetReportInterval(0, statistics.WriteReportInterval)) + pdctl.MustPutRegion(re, cluster, 1, 2, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), core.SetReportInterval(0, utils.RegionHeartBeatReportInterval)) + pdctl.MustPutRegion(re, cluster, 2, 3, []byte("b"), []byte("c"), core.SetWrittenBytes(3000000000), core.SetReportInterval(0, utils.RegionHeartBeatReportInterval)) // store remove-tombstone args := []string{"-u", pdAddr, "store", "remove-tombstone"} output, err := pdctl.ExecuteCommand(cmd, args...) diff --git a/tests/server/storage/hot_region_storage_test.go b/tests/server/storage/hot_region_storage_test.go index fee4944826c..21881802d7d 100644 --- a/tests/server/storage/hot_region_storage_test.go +++ b/tests/server/storage/hot_region_storage_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/config" @@ -68,17 +69,17 @@ func TestHotRegionStorage(t *testing.T) { defer cluster.Destroy() startTime := time.Now().Unix() pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) pdctl.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f"), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) pdctl.MustPutRegion(re, cluster, 4, 2, []byte("g"), []byte("h"), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) storeStats := []*pdpb.StoreStats{ { StoreId: 1, - Interval: &pdpb.TimeInterval{StartTimestamp: uint64(startTime - statistics.StoreHeartBeatReportInterval), EndTimestamp: uint64(startTime)}, + Interval: &pdpb.TimeInterval{StartTimestamp: uint64(startTime - utils.StoreHeartBeatReportInterval), EndTimestamp: uint64(startTime)}, PeerStats: []*pdpb.PeerStat{ { RegionId: 3, @@ -88,7 +89,7 @@ func TestHotRegionStorage(t *testing.T) { }, { StoreId: 2, - Interval: &pdpb.TimeInterval{StartTimestamp: uint64(startTime - statistics.StoreHeartBeatReportInterval), EndTimestamp: uint64(startTime)}, + Interval: &pdpb.TimeInterval{StartTimestamp: uint64(startTime - utils.StoreHeartBeatReportInterval), EndTimestamp: uint64(startTime)}, PeerStats: []*pdpb.PeerStat{ { RegionId: 4, @@ -176,7 +177,7 @@ func TestHotRegionStorageReservedDayConfigChange(t *testing.T) { defer cluster.Destroy() startTime := time.Now().Unix() pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), - core.SetReportInterval(uint64(startTime-statistics.RegionHeartBeatReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) var iter storage.HotRegionStorageIterator var next *storage.HistoryHotRegion testutil.Eventually(re, func() bool { // wait for the history hot region to be written to the storage @@ -197,7 +198,7 @@ func TestHotRegionStorageReservedDayConfigChange(t *testing.T) { leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), - core.SetReportInterval(uint64(time.Now().Unix()-statistics.WriteReportInterval), uint64(time.Now().Unix()))) + core.SetReportInterval(uint64(time.Now().Unix()-utils.RegionHeartBeatReportInterval), uint64(time.Now().Unix()))) time.Sleep(10 * interval) hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() iter = hotRegionStorage.NewIterator([]string{storage.WriteType.String()}, startTime*1000, time.Now().UnixMilli()) @@ -269,7 +270,7 @@ func TestHotRegionStorageWriteIntervalConfigChange(t *testing.T) { startTime := time.Now().Unix() pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"), core.SetWrittenBytes(3000000000), - core.SetReportInterval(uint64(startTime-statistics.WriteReportInterval), uint64(startTime))) + core.SetReportInterval(uint64(startTime-utils.RegionHeartBeatReportInterval), uint64(startTime))) var iter storage.HotRegionStorageIterator var next *storage.HistoryHotRegion testutil.Eventually(re, func() bool { // wait for the history hot region to be written to the storage @@ -290,7 +291,7 @@ func TestHotRegionStorageWriteIntervalConfigChange(t *testing.T) { leaderServer.GetServer().SetScheduleConfig(schedule) time.Sleep(3 * interval) pdctl.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d"), core.SetWrittenBytes(6000000000), - core.SetReportInterval(uint64(time.Now().Unix()-statistics.WriteReportInterval), uint64(time.Now().Unix()))) + core.SetReportInterval(uint64(time.Now().Unix()-utils.RegionHeartBeatReportInterval), uint64(time.Now().Unix()))) time.Sleep(10 * interval) // it cant get new hot region because wait time smaller than hot region write interval hotRegionStorage := leaderServer.GetServer().GetHistoryHotRegionStorage() diff --git a/tests/server/tso/allocator_test.go b/tests/server/tso/allocator_test.go index 8b1ad3dc7d1..41f544729c2 100644 --- a/tests/server/tso/allocator_test.go +++ b/tests/server/tso/allocator_test.go @@ -215,7 +215,7 @@ func testTSOSuffix(re *require.Assertions, cluster *tests.TestCluster, am *tso.A re.NoError(err) var tso pdpb.Timestamp testutil.Eventually(re, func() bool { - tso, err = allocator.GenerateTSO(1) + tso, err = allocator.GenerateTSO(context.Background(), 1) re.NoError(err) return tso.GetPhysical() != 0 }) diff --git a/tools/pd-ctl/pdctl/command/keyspace_group_command.go b/tools/pd-ctl/pdctl/command/keyspace_group_command.go index 6dea6d78dd6..08d5c875a18 100644 --- a/tools/pd-ctl/pdctl/command/keyspace_group_command.go +++ b/tools/pd-ctl/pdctl/command/keyspace_group_command.go @@ -242,7 +242,7 @@ func finishSplitKeyspaceGroupCommandFunc(cmd *cobra.Command, args []string) { } _, err = doRequest(cmd, fmt.Sprintf("%s/%s/split", keyspaceGroupsPrefix, args[0]), http.MethodDelete, http.Header{}) if err != nil { - cmd.Println(err) + cmd.Printf("Failed to finish split-keyspace-group: %s\n", err) return } cmd.Println("Success!") @@ -309,7 +309,7 @@ func finishMergeKeyspaceGroupCommandFunc(cmd *cobra.Command, args []string) { } _, err = doRequest(cmd, fmt.Sprintf("%s/%s/merge", keyspaceGroupsPrefix, args[0]), http.MethodDelete, http.Header{}) if err != nil { - cmd.Println(err) + cmd.Printf("Failed to finish merge-keyspace-group: %s\n", err) return } cmd.Println("Success!") diff --git a/tools/pd-ctl/pdctl/command/scheduler.go b/tools/pd-ctl/pdctl/command/scheduler.go index 93335723641..57658022858 100644 --- a/tools/pd-ctl/pdctl/command/scheduler.go +++ b/tools/pd-ctl/pdctl/command/scheduler.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/spf13/cobra" - "github.com/tikv/pd/pkg/statistics" + "github.com/tikv/pd/pkg/statistics/utils" ) var ( @@ -717,14 +717,14 @@ func postSchedulerConfigCommandFunc(cmd *cobra.Command, schedulerName string, ar priorities := make([]string, 0) prioritiesMap := make(map[string]struct{}) for _, priority := range strings.Split(value, ",") { - if priority != statistics.BytePriority && priority != statistics.KeyPriority && priority != statistics.QueryPriority { + if priority != utils.BytePriority && priority != utils.KeyPriority && priority != utils.QueryPriority { cmd.Println(fmt.Sprintf("priority should be one of [%s, %s, %s]", - statistics.BytePriority, - statistics.QueryPriority, - statistics.KeyPriority)) + utils.BytePriority, + utils.QueryPriority, + utils.KeyPriority)) return } - if priority == statistics.QueryPriority && key == "write-peer-priorities" { + if priority == utils.QueryPriority && key == "write-peer-priorities" { cmd.Println("query is not allowed to be set in priorities for write-peer-priorities") return } diff --git a/tools/pd-tso-bench/go.mod b/tools/pd-tso-bench/go.mod index 6c10dbb5fce..64a99a42d35 100644 --- a/tools/pd-tso-bench/go.mod +++ b/tools/pd-tso-bench/go.mod @@ -1,6 +1,6 @@ module github.com/tools/pd-tso-bench -go 1.16 +go 1.21 require ( github.com/influxdata/tdigest v0.0.1 @@ -12,4 +12,27 @@ require ( google.golang.org/grpc v1.54.0 ) +require ( + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect + github.com/pingcap/kvproto v0.0.0-20230727073445-53e1f8730c30 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.26.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect +) + replace github.com/tikv/pd/client => ../../client diff --git a/tools/pd-tso-bench/go.sum b/tools/pd-tso-bench/go.sum index 1fa45d063ff..eb3dd6d95f2 100644 --- a/tools/pd-tso-bench/go.sum +++ b/tools/pd-tso-bench/go.sum @@ -1,617 +1,10 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -619,245 +12,70 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY= github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= @@ -868,13 +86,10 @@ github.com/pingcap/kvproto v0.0.0-20230727073445-53e1f8730c30 h1:EvqKcDT7ceGLW0m github.com/pingcap/kvproto v0.0.0-20230727073445-53e1f8730c30/go.mod h1:r0q/CFcwvyeRhKtoqzmWMBebrtpIziQQ9vR+JKh1knc= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -884,7 +99,6 @@ github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -896,55 +110,20 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -962,660 +141,91 @@ go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1624,49 +234,3 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=