diff --git a/go.mod b/go.mod index 5b4c32ddb214b..10d9df517dbfd 100644 --- a/go.mod +++ b/go.mod @@ -50,9 +50,9 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb + github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4 github.com/grafana/go-gelf/v2 v2.0.1 - github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 + github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 @@ -290,7 +290,6 @@ require ( github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/soheilhy/cmux v0.1.5 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect diff --git a/go.sum b/go.sum index 744c904e823c2..3f0f67f89c7ed 100644 --- a/go.sum +++ b/go.sum @@ -993,14 +993,14 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb h1:AWE6+kvtE18HP+lRWNUCyvymyrFSXs6TcS2vXIXGIuw= -github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb/go.mod h1:kkWM4WUV230bNG3urVRWPBnSJHs64y/0RmWjftnnn0c= +github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4 h1:FoWNgb658QLY7CJTvnvZMKJxbT+xF471vr6TErGh1jI= +github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4/go.mod h1:RpTvZ9nkdXqyQro5DULQHJl9B6vwvEj95Dk6WIXqTLQ= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= -github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 h1:aLBiDMjTtXx2800iCIp+8kdjIlvGX0MF/zICQMQO2qU= -github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= +github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d h1:YwbJJ/PrVWVdnR+j/EAVuazdeP+Za5qbiH1Vlr+wFXs= github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= @@ -1659,8 +1659,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index e6a8e27578c21..3595561ee201a 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -10,9 +10,9 @@ require ( github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 + github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4 github.com/grafana/loki v1.6.2-0.20230216091802-4e4359e67c6c - github.com/prometheus/common v0.39.0 + github.com/prometheus/common v0.44.0 github.com/stretchr/testify v1.8.1 ) @@ -35,7 +35,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.4.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -44,7 +44,7 @@ require ( github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/status v1.1.1 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/btree v1.1.2 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 // indirect @@ -78,10 +78,10 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/exporter-toolkit v0.8.2 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/prometheus v0.41.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect @@ -99,19 +99,21 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.17.0 // indirect - golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/sync v0.1.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/grpc v1.52.3 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index bedf3022d5312..b4731be06a0f2 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -79,7 +79,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-lambda-go v1.26.0 h1:6ujqBpYF7tdZcBvPIccs98SpeGfrt/UOVEiexfNIdHA= github.com/aws/aws-lambda-go v1.26.0/go.mod h1:jJmlefzPfGnckuHdXX7/80O3BvUUi12XOkbv4w9SGLU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA= +github.com/aws/aws-sdk-go v1.44.321 h1:iXwFLxWjZPjYqjPq0EcCs46xX7oDLEELte1+BzgpKk8= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= @@ -147,8 +147,9 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -238,8 +239,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -295,8 +297,8 @@ github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 h1:IOks+FXJ6iO/pfbaVEf4efNw+YzYBYNCkCabyrbkFTM= -github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2/go.mod h1:zj+5BNZAVmQafV583uLTAOzRr963KPdEm4d6NPmtbwg= +github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4 h1:FoWNgb658QLY7CJTvnvZMKJxbT+xF471vr6TErGh1jI= +github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4/go.mod h1:RpTvZ9nkdXqyQro5DULQHJl9B6vwvEj95Dk6WIXqTLQ= github.com/grafana/loki v1.6.2-0.20230216091802-4e4359e67c6c h1:4JjETlwJs5VJgM5iLdcwksrZSBkwfqGT94kj8e3Y3tM= github.com/grafana/loki v1.6.2-0.20230216091802-4e4359e67c6c/go.mod h1:EmSFg/t1wTEcN9MU1nVq2R7y25pVcqOA+Qv7luldlIo= github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 h1:VXitROTlmZtLzvokNe8ZbUKpmwldM4Hy1zdNRO32jKU= @@ -386,7 +388,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -463,38 +465,40 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= -github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= +github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= +github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -594,8 +598,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4= -golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -700,8 +704,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -714,8 +718,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1014,8 +1019,12 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1049,8 +1058,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11 google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1066,8 +1075,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/grafana/dskit/concurrency/runner.go b/vendor/github.com/grafana/dskit/concurrency/runner.go index 023be10d7a0a3..fcc8929971491 100644 --- a/vendor/github.com/grafana/dskit/concurrency/runner.go +++ b/vendor/github.com/grafana/dskit/concurrency/runner.go @@ -83,11 +83,25 @@ func CreateJobsFromStrings(values []string) []interface{} { } // ForEachJob runs the provided jobFunc for each job index in [0, jobs) up to concurrency concurrent workers. +// If the concurrency value is <= 0 all jobs will be executed in parallel. +// // The execution breaks on first error encountered. +// +// ForEachJob cancels the context.Context passed to each invocation of jobFunc before ForEachJob returns. func ForEachJob(ctx context.Context, jobs int, concurrency int, jobFunc func(ctx context.Context, idx int) error) error { if jobs == 0 { return nil } + if jobs == 1 { + // Honor the function contract, cancelling the context passed to the jobFunc once it completed. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + return jobFunc(ctx, 0) + } + if concurrency <= 0 { + concurrency = jobs + } // Initialise indexes with -1 so first Inc() returns index 0. indexes := atomic.NewInt64(-1) @@ -113,3 +127,35 @@ func ForEachJob(ctx context.Context, jobs int, concurrency int, jobFunc func(ctx // Wait until done (or context has canceled). return g.Wait() } + +// ForEachJobMergeResults is like ForEachJob but expects jobFunc to return a slice of results which are then +// merged with results from all jobs. This function returns no results if an error occurred running any jobFunc. +// +// ForEachJobMergeResults cancels the context.Context passed to each invocation of jobFunc before ForEachJobMergeResults returns. +func ForEachJobMergeResults[J any, R any](ctx context.Context, jobs []J, concurrency int, jobFunc func(ctx context.Context, job J) ([]R, error)) ([]R, error) { + var ( + resultsMx sync.Mutex + results = make([]R, 0, len(jobs)) // Assume at least 1 result per job. + ) + + err := ForEachJob(ctx, len(jobs), concurrency, func(ctx context.Context, idx int) error { + jobResult, jobErr := jobFunc(ctx, jobs[idx]) + if jobErr != nil { + return jobErr + } + + resultsMx.Lock() + results = append(results, jobResult...) + resultsMx.Unlock() + + return nil + }) + + if err != nil { + return nil, err + } + + // Given no error occurred, it means that all job results have already been collected + // and so it's safe to access results slice with no locking. + return results, nil +} diff --git a/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go b/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go index 21abbb7865689..b0d7f9004f8d1 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go +++ b/vendor/github.com/grafana/dskit/grpcclient/backoff_retry.go @@ -2,6 +2,7 @@ package grpcclient import ( "context" + "errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -10,22 +11,27 @@ import ( "github.com/grafana/dskit/backoff" ) -// NewBackoffRetry gRPC middleware. -func NewBackoffRetry(cfg backoff.Config) grpc.UnaryClientInterceptor { +// NewRateLimitRetrier creates a UnaryClientInterceptor which retries with backoff +// the calls from invoker when the executed RPC is rate limited. +func NewRateLimitRetrier(cfg backoff.Config) grpc.UnaryClientInterceptor { return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { backoff := backoff.New(ctx, cfg) + var err error for backoff.Ongoing() { - err := invoker(ctx, method, req, reply, cc, opts...) + err = invoker(ctx, method, req, reply, cc, opts...) if err == nil { return nil } + // Only ResourceExhausted statuses are handled as signals of being rate limited, + // following the implementation of package's RateLimiter interceptor. + // All other errors are propogated as-is upstream. if status.Code(err) != codes.ResourceExhausted { return err } backoff.Wait() } - return backoff.Err() + return errors.Join(err, backoff.Err()) } } diff --git a/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go b/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go index b171889d0a048..7518990471549 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go +++ b/vendor/github.com/grafana/dskit/grpcclient/grpcclient.go @@ -108,7 +108,7 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep streamClientInterceptors = append(streamClientInterceptors, cfg.StreamMiddleware...) if cfg.BackoffOnRatelimits { - unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewBackoffRetry(cfg.BackoffConfig)}, unaryClientInterceptors...) + unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewRateLimitRetrier(cfg.BackoffConfig)}, unaryClientInterceptors...) } if cfg.RateLimit > 0 { diff --git a/vendor/github.com/grafana/dskit/instrument/instrument.go b/vendor/github.com/grafana/dskit/instrument/instrument.go index 4ea480b29d60e..f54e49def3086 100644 --- a/vendor/github.com/grafana/dskit/instrument/instrument.go +++ b/vendor/github.com/grafana/dskit/instrument/instrument.go @@ -75,7 +75,7 @@ func ObserveWithExemplar(ctx context.Context, histogram prometheus.Observer, sec if traceID, ok := tracing.ExtractSampledTraceID(ctx); ok { histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( seconds, - prometheus.Labels{"traceID": traceID}, + prometheus.Labels{"trace_id": traceID, "traceID": traceID}, ) return } diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index 693964b5ad067..e8a94debe181c 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -177,7 +177,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { // "Defaults to hostname" -- memberlist sets it to hostname by default. f.StringVar(&cfg.NodeName, prefix+"memberlist.nodename", "", "Name of the node in memberlist cluster. Defaults to hostname.") // memberlist.DefaultLANConfig will put hostname here. f.BoolVar(&cfg.RandomizeNodeName, prefix+"memberlist.randomize-node-name", true, "Add random suffix to the node name.") - f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", mlDefaults.TCPTimeout, "The timeout for establishing a connection with a remote node, and for read/write operations.") + f.DurationVar(&cfg.StreamTimeout, prefix+"memberlist.stream-timeout", 2*time.Second, "The timeout for establishing a connection with a remote node, and for read/write operations.") f.IntVar(&cfg.RetransmitMult, prefix+"memberlist.retransmit-factor", mlDefaults.RetransmitMult, "Multiplication factor used when sending out messages (factor * log(N+1)).") f.Var(&cfg.JoinMembers, prefix+"memberlist.join", "Other cluster members to join. Can be specified multiple times. It can be an IP, hostname or an entry specified in the DNS Service Discovery format.") f.DurationVar(&cfg.MinJoinBackoff, prefix+"memberlist.min-join-backoff", 1*time.Second, "Min backoff duration to join other cluster members.") diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go b/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go index 2c02acfa468e4..9833a858b4761 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/mergeable.go @@ -35,17 +35,17 @@ type Mergeable interface { // used when doing CAS operation) Merge(other Mergeable, localCAS bool) (change Mergeable, error error) - // Describes the content of this mergeable value. Used by memberlist client to decide if + // MergeContent describes the content of this mergeable value. Used by memberlist client to decide if // one change-value can invalidate some other value, that was received previously. // Invalidation can happen only if output of MergeContent is a superset of some other MergeContent. MergeContent() []string - // Remove tombstones older than given limit from this mergeable. + // RemoveTombstones remove tombstones older than given limit from this mergeable. // If limit is zero time, remove all tombstones. Memberlist client calls this method with zero limit each // time when client is accessing value from the store. It can be used to hide tombstones from the clients. // Returns the total number of tombstones present and the number of removed tombstones by this invocation. RemoveTombstones(limit time.Time) (total, removed int) - // Clone should return a deep copy of the state. + // Clone returns a deep copy of the state. Clone() Mergeable } diff --git a/vendor/github.com/grafana/dskit/middleware/logging.go b/vendor/github.com/grafana/dskit/middleware/logging.go index aeb15cc6b63a1..fe00d3a82846c 100644 --- a/vendor/github.com/grafana/dskit/middleware/logging.go +++ b/vendor/github.com/grafana/dskit/middleware/logging.go @@ -58,7 +58,7 @@ func (l Log) logWithRequest(r *http.Request) log.Logger { localLog := l.Log traceID, ok := tracing.ExtractTraceID(r.Context()) if ok { - localLog = log.With(localLog, "traceID", traceID) + localLog = log.With(localLog, "trace_id", traceID) } if l.SourceIPs != nil { diff --git a/vendor/github.com/grafana/dskit/middleware/source_ips.go b/vendor/github.com/grafana/dskit/middleware/source_ips.go index 7c035ddbf47e6..d08797abb09b5 100644 --- a/vendor/github.com/grafana/dskit/middleware/source_ips.go +++ b/vendor/github.com/grafana/dskit/middleware/source_ips.go @@ -18,6 +18,9 @@ var ( // De-facto standard header keys. xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") xRealIP = http.CanonicalHeaderKey("X-Real-IP") + // Allows to extract the host from the X-Forwarded-For header. + // Will strip out any spaces or double quote surrounding host. + xForwardedForRegex = regexp.MustCompile(`(?: *"?([^,]+)"? *)`) ) var ( @@ -25,9 +28,9 @@ var ( // existing use of X-Forwarded-* headers. // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 forwarded = http.CanonicalHeaderKey("Forwarded") - // Allows for a sub-match of the first value after 'for=' to the next - // comma, semi-colon or space. The match is case-insensitive. - forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) + // Allows to extract the host from the for clause of the Forwarded header. + // Will strip out any spaces or double quote surrounding host. + forwardedRegex = regexp.MustCompile(`(?i)(?:for=)(?: *"?([^;,]+)"? *)`) ) // SourceIPExtractor extracts the source IPs from a HTTP request @@ -37,10 +40,12 @@ type SourceIPExtractor struct { // A regex that extracts the IP address from the header. // It should contain at least one capturing group the first of which will be returned. regex *regexp.Regexp + // A boolean to choose if we should return all found IP or just first match + extractAllHosts bool } // NewSourceIPs creates a new SourceIPs -func NewSourceIPs(header, regex string) (*SourceIPExtractor, error) { +func NewSourceIPs(header, regex string, extractAllHosts bool) (*SourceIPExtractor, error) { if (header == "" && regex != "") || (header != "" && regex == "") { return nil, fmt.Errorf("either both a header field and a regex have to be given or neither") } @@ -50,8 +55,9 @@ func NewSourceIPs(header, regex string) (*SourceIPExtractor, error) { } return &SourceIPExtractor{ - header: header, - regex: re, + header: header, + regex: re, + extractAllHosts: extractAllHosts, }, nil } @@ -72,7 +78,15 @@ func extractHost(address string) string { // Get returns any source addresses we can find in the request, comma-separated func (sips SourceIPExtractor) Get(req *http.Request) string { - fwd := extractHost(sips.getIP(req)) + hosts := []string{} + + // Remove port informations from extracted address + for _, addr := range sips.getIP(req) { + hosts = append(hosts, extractHost(addr)) + } + + fwd := strings.Join(hosts, ", ") + if fwd == "" { if req.RemoteAddr == "" { return "" @@ -94,52 +108,45 @@ func (sips SourceIPExtractor) Get(req *http.Request) string { // getIP retrieves the IP from the RFC7239 Forwarded headers, // X-Real-IP and X-Forwarded-For (in that order) or from the // custom regex. -func (sips SourceIPExtractor) getIP(r *http.Request) string { - var addr string +func (sips SourceIPExtractor) getIP(r *http.Request) []string { + var addrs = []string{} // Use the custom regex only if it was setup if sips.header != "" { hdr := r.Header.Get(sips.header) if hdr == "" { - return "" - } - allMatches := sips.regex.FindAllStringSubmatch(hdr, 1) - if len(allMatches) == 0 { - return "" - } - firstMatch := allMatches[0] - // Check there is at least 1 submatch - if len(firstMatch) < 2 { - return "" + return addrs } - return firstMatch[1] - } - if fwd := r.Header.Get(forwarded); fwd != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) - } + addrs = sips.extractHeader(hdr, sips.regex) + } else if fwd := r.Header.Get(forwarded); fwd != "" { + addrs = sips.extractHeader(fwd, forwardedRegex) } else if fwd := r.Header.Get(xRealIP); fwd != "" { // X-Real-IP should only contain one IP address (the client making the // request). - addr = fwd + addrs = append([]string{}, fwd) } else if fwd := strings.ReplaceAll(r.Header.Get(xForwardedFor), " ", ""); fwd != "" { - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ",") - if s == -1 { - s = len(fwd) + addrs = sips.extractHeader(fwd, xForwardedForRegex) + } + + return addrs +} + +// extractHeader is a toolbox function that will parse a header content with a regex and return a list +// of all matching groups as string. +func (sips SourceIPExtractor) extractHeader(header string, regex *regexp.Regexp) []string { + var addrs = []string{} + + if allMatches := regex.FindAllStringSubmatch(header, -1); len(allMatches) > 0 { + for _, match := range allMatches { + if len(match) > 1 { + addrs = append(addrs, match[1]) + } + if !sips.extractAllHosts { + break + } } - addr = fwd[:s] } - return addr + return addrs } diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index 7781fe67a5ae0..f982bd6c68c3e 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -49,9 +49,26 @@ func isHTTPStatus4xx(err error) bool { return code/100 == 4 } +// DoBatchRing defines the interface required by a ring implementation to use DoBatch() and DoBatchWithOptions(). +type DoBatchRing interface { + // Get returns a ReplicationSet containing the instances to which the input key should be sharded to + // for the input Operation. + // + // The input buffers may be referenced in the returned ReplicationSet. This means that it's unsafe to call + // Get() multiple times passing the same buffers if ReplicationSet is retained between two different Get() + // calls. In this cas, you can pass nil buffers. + Get(key uint32, op Operation, bufInstances []InstanceDesc, bufStrings1, bufStrings2 []string) (ReplicationSet, error) + + // ReplicationFactor returns the number of instances each key is expected to be sharded to. + ReplicationFactor() int + + // InstancesCount returns the number of instances in the ring eligible to get any key sharded to. + InstancesCount() int +} + // DoBatch is a deprecated version of DoBatchWithOptions where grpc errors containing status codes 4xx are treated as client errors. // Deprecated. Use DoBatchWithOptions instead. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { +func DoBatch(ctx context.Context, op Operation, r DoBatchRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { return DoBatchWithOptions(ctx, op, r, keys, callback, DoBatchOptions{ Cleanup: cleanup, IsClientError: isHTTPStatus4xx, @@ -94,14 +111,14 @@ func (o *DoBatchOptions) replaceZeroValuesWithDefaults() { // See comments on DoBatchOptions for available options for this call. // // Not implemented as a method on Ring, so we can test separately. -func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { +func DoBatchWithOptions(ctx context.Context, op Operation, r DoBatchRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { o.replaceZeroValuesWithDefaults() if r.InstancesCount() <= 0 { o.Cleanup() return fmt.Errorf("DoBatch: InstancesCount <= 0") } - expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() + expectedTrackersPerInstance := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() itemTrackers := make([]itemTracker, len(keys)) instances := make(map[string]instance, r.InstancesCount()) @@ -132,8 +149,8 @@ func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []ui for _, desc := range replicationSet.Instances { curr, found := instances[desc.Addr] if !found { - curr.itemTrackers = make([]*itemTracker, 0, expectedTrackers) - curr.indexes = make([]int, 0, expectedTrackers) + curr.itemTrackers = make([]*itemTracker, 0, expectedTrackersPerInstance) + curr.indexes = make([]int, 0, expectedTrackersPerInstance) } instances[desc.Addr] = instance{ desc: desc, diff --git a/vendor/github.com/grafana/dskit/ring/model.go b/vendor/github.com/grafana/dskit/ring/model.go index 956dbe0cf4224..5b4f1bc5dc566 100644 --- a/vendor/github.com/grafana/dskit/ring/model.go +++ b/vendor/github.com/grafana/dskit/ring/model.go @@ -21,6 +21,13 @@ func (ts ByAddr) Len() int { return len(ts) } func (ts ByAddr) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } func (ts ByAddr) Less(i, j int) bool { return ts[i].Addr < ts[j].Addr } +// ByID is a sortable list of InstanceDesc. +type ByID []InstanceDesc + +func (ts ByID) Len() int { return len(ts) } +func (ts ByID) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } +func (ts ByID) Less(i, j int) bool { return ts[i].Id < ts[j].Id } + // ProtoDescFactory makes new Descs func ProtoDescFactory() proto.Message { return NewDesc() @@ -195,7 +202,6 @@ func (d *Desc) mergeWithTime(mergeable memberlist.Mergeable, localCAS bool, now other, ok := mergeable.(*Desc) if !ok { - // This method only deals with non-nil rings. return nil, fmt.Errorf("expected *ring.Desc, got %T", mergeable) } @@ -512,6 +518,16 @@ func (d *Desc) getOldestRegisteredTimestamp() int64 { return result } +func (d *Desc) instancesCountPerZone() map[string]int { + instancesCountPerZone := map[string]int{} + if d != nil { + for _, ingester := range d.Ingesters { + instancesCountPerZone[ingester.Zone]++ + } + } + return instancesCountPerZone +} + type CompareResult int // CompareResult responses diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go new file mode 100644 index 0000000000000..9ad31a54f26d3 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_instance_lifecycler.go @@ -0,0 +1,412 @@ +package ring + +import ( + "context" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/atomic" + + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/services" +) + +var ( + ErrPartitionDoesNotExist = errors.New("the partition does not exist") + ErrPartitionStateMismatch = errors.New("the partition state does not match the expected one") + ErrPartitionStateChangeNotAllowed = errors.New("partition state change not allowed") + + allowedPartitionStateChanges = map[PartitionState][]PartitionState{ + PartitionPending: {PartitionActive, PartitionInactive}, + PartitionActive: {PartitionInactive}, + PartitionInactive: {PartitionPending, PartitionActive}, + } +) + +type PartitionInstanceLifecyclerConfig struct { + // PartitionID is the ID of the partition managed by the lifecycler. + PartitionID int32 + + // InstanceID is the ID of the instance managed by the lifecycler. + InstanceID string + + // WaitOwnersCountOnPending is the minimum number of owners to wait before switching a + // PENDING partition to ACTIVE. + WaitOwnersCountOnPending int + + // WaitOwnersDurationOnPending is how long each owner should have been added to the + // partition before it's considered eligible for the WaitOwnersCountOnPending count. + WaitOwnersDurationOnPending time.Duration + + // DeleteInactivePartitionAfterDuration is how long the lifecycler should wait before + // deleting inactive partitions with no owners. Inactive partitions are never removed + // if this value is 0. + DeleteInactivePartitionAfterDuration time.Duration + + // PollingInterval is the internal polling interval. This setting is useful to let + // upstream projects to lower it in unit tests. + PollingInterval time.Duration +} + +// PartitionInstanceLifecycler is responsible to manage the lifecycle of a single +// partition and partition owner in the ring. +type PartitionInstanceLifecycler struct { + *services.BasicService + + // These values are initialised at startup, and never change. + cfg PartitionInstanceLifecyclerConfig + ringName string + ringKey string + store kv.Client + logger log.Logger + + // Channel used to execute logic within the lifecycler loop. + actorChan chan func() + + // Whether the partitions should be created on startup if it doesn't exist yet. + createPartitionOnStartup *atomic.Bool + + // Whether the lifecycler should remove the partition owner (identified by instance ID) on shutdown. + removeOwnerOnShutdown *atomic.Bool + + // Metrics. + reconcilesTotal *prometheus.CounterVec + reconcilesFailedTotal *prometheus.CounterVec +} + +func NewPartitionInstanceLifecycler(cfg PartitionInstanceLifecyclerConfig, ringName, ringKey string, store kv.Client, logger log.Logger, reg prometheus.Registerer) *PartitionInstanceLifecycler { + if cfg.PollingInterval == 0 { + cfg.PollingInterval = 5 * time.Second + } + + l := &PartitionInstanceLifecycler{ + cfg: cfg, + ringName: ringName, + ringKey: ringKey, + store: store, + logger: log.With(logger, "ring", ringName), + actorChan: make(chan func()), + createPartitionOnStartup: atomic.NewBool(true), + removeOwnerOnShutdown: atomic.NewBool(false), + reconcilesTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "partition_ring_lifecycler_reconciles_total", + Help: "Total number of reconciliations started.", + ConstLabels: map[string]string{"name": ringName}, + }, []string{"type"}), + reconcilesFailedTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "partition_ring_lifecycler_reconciles_failed_total", + Help: "Total number of reconciliations failed.", + ConstLabels: map[string]string{"name": ringName}, + }, []string{"type"}), + } + + l.BasicService = services.NewBasicService(l.starting, l.running, l.stopping) + + return l +} + +// CreatePartitionOnStartup returns whether the lifecycle creates the partition on startup +// if it doesn't exist. +func (l *PartitionInstanceLifecycler) CreatePartitionOnStartup() bool { + return l.createPartitionOnStartup.Load() +} + +// SetCreatePartitionOnStartup sets whether the lifecycler should create the partition on +// startup if it doesn't exist. +func (l *PartitionInstanceLifecycler) SetCreatePartitionOnStartup(create bool) { + l.createPartitionOnStartup.Store(create) +} + +// RemoveOwnerOnShutdown returns whether the lifecycler has been configured to remove the partition +// owner on shutdown. +func (l *PartitionInstanceLifecycler) RemoveOwnerOnShutdown() bool { + return l.removeOwnerOnShutdown.Load() +} + +// SetRemoveOwnerOnShutdown sets whether the lifecycler should remove the partition owner on shutdown. +func (l *PartitionInstanceLifecycler) SetRemoveOwnerOnShutdown(remove bool) { + l.removeOwnerOnShutdown.Store(remove) +} + +// GetPartitionState returns the current state of the partition, and the timestamp when the state was +// changed the last time. +func (l *PartitionInstanceLifecycler) GetPartitionState(ctx context.Context) (PartitionState, time.Time, error) { + ring, err := l.getRing(ctx) + if err != nil { + return PartitionUnknown, time.Time{}, err + } + + partition, exists := ring.Partitions[l.cfg.PartitionID] + if !exists { + return PartitionUnknown, time.Time{}, ErrPartitionDoesNotExist + } + + return partition.GetState(), partition.GetStateTime(), nil +} + +// ChangePartitionState changes the partition state to toState. +// This function returns ErrPartitionDoesNotExist if the partition doesn't exist, +// and ErrPartitionStateChangeNotAllowed if the state change is not allowed. +func (l *PartitionInstanceLifecycler) ChangePartitionState(ctx context.Context, toState PartitionState) error { + return l.runOnLifecyclerLoop(func() error { + err := l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return changePartitionState(ring, l.cfg.PartitionID, toState) + }) + + if err != nil { + level.Warn(l.logger).Log("msg", "failed to change partition state", "partition", l.cfg.PartitionID, "to_state", toState, "err", err) + } + + return err + }) +} + +func (l *PartitionInstanceLifecycler) starting(ctx context.Context) error { + if l.CreatePartitionOnStartup() { + return errors.Wrap(l.createPartitionAndRegisterOwner(ctx), "create partition and register owner") + } + + return errors.Wrap(l.waitPartitionAndRegisterOwner(ctx), "wait partition and register owner") +} + +func (l *PartitionInstanceLifecycler) running(ctx context.Context) error { + reconcile := func() { + l.reconcileOwnedPartition(ctx, time.Now()) + l.reconcileOtherPartitions(ctx, time.Now()) + } + + // Run a reconciliation as soon as the lifecycler, in order to not having to wait for the 1st timer tick. + reconcile() + + reconcileTicker := time.NewTicker(l.cfg.PollingInterval) + defer reconcileTicker.Stop() + + for { + select { + case <-reconcileTicker.C: + reconcile() + + case f := <-l.actorChan: + f() + + case <-ctx.Done(): + return nil + } + } +} + +func (l *PartitionInstanceLifecycler) stopping(_ error) error { + level.Info(l.logger).Log("msg", "partition ring lifecycler is shutting down", "ring", l.ringName) + + // Remove the instance from partition owners, if configured to do so. + if l.RemoveOwnerOnShutdown() { + err := l.updateRing(context.Background(), func(ring *PartitionRingDesc) (bool, error) { + return ring.RemoveOwner(l.cfg.InstanceID), nil + }) + + if err != nil { + level.Error(l.logger).Log("msg", "failed to remove instance from partition owners on shutdown", "instance", l.cfg.InstanceID, "partition", l.cfg.PartitionID, "err", err) + } else { + level.Info(l.logger).Log("msg", "instance removed from partition owners", "instance", l.cfg.InstanceID, "partition", l.cfg.PartitionID) + } + } + + return nil +} + +// runOnLifecyclerLoop runs fn within the lifecycler loop. +func (l *PartitionInstanceLifecycler) runOnLifecyclerLoop(fn func() error) error { + sc := l.ServiceContext() + if sc == nil { + return errors.New("lifecycler not running") + } + + errCh := make(chan error) + wrappedFn := func() { + errCh <- fn() + } + + select { + case <-sc.Done(): + return errors.New("lifecycler not running") + case l.actorChan <- wrappedFn: + return <-errCh + } +} + +func (l *PartitionInstanceLifecycler) getRing(ctx context.Context) (*PartitionRingDesc, error) { + in, err := l.store.Get(ctx, l.ringKey) + if err != nil { + return nil, err + } + + return GetOrCreatePartitionRingDesc(in), nil +} + +func (l *PartitionInstanceLifecycler) updateRing(ctx context.Context, update func(ring *PartitionRingDesc) (bool, error)) error { + return l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc := GetOrCreatePartitionRingDesc(in) + + if changed, err := update(ringDesc); err != nil { + return nil, false, err + } else if !changed { + return nil, false, nil + } + + return ringDesc, true, nil + }) +} + +func (l *PartitionInstanceLifecycler) createPartitionAndRegisterOwner(ctx context.Context) error { + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + now := time.Now() + changed := false + + partitionDesc, exists := ring.Partitions[l.cfg.PartitionID] + if exists { + level.Info(l.logger).Log("msg", "partition found in the ring", "partition", l.cfg.PartitionID, "state", partitionDesc.GetState(), "state_timestamp", partitionDesc.GetState().String(), "tokens", len(partitionDesc.GetTokens())) + } else { + level.Info(l.logger).Log("msg", "partition not found in the ring", "partition", l.cfg.PartitionID) + } + + if !exists { + // The partition doesn't exist, so we create a new one. A new partition should always be created + // in PENDING state. + ring.AddPartition(l.cfg.PartitionID, PartitionPending, now) + changed = true + } + + // Ensure the instance is added as partition owner. + if ring.AddOrUpdateOwner(l.cfg.InstanceID, OwnerActive, l.cfg.PartitionID, now) { + changed = true + } + + return changed, nil + }) +} + +func (l *PartitionInstanceLifecycler) waitPartitionAndRegisterOwner(ctx context.Context) error { + pollTicker := time.NewTicker(l.cfg.PollingInterval) + defer pollTicker.Stop() + + // Wait until the partition exists. + checkPartitionExist := func() (bool, error) { + level.Info(l.logger).Log("msg", "checking if the partition exist in the ring", "partition", l.cfg.PartitionID) + + ring, err := l.getRing(ctx) + if err != nil { + return false, errors.Wrap(err, "read partition ring") + } + + if ring.HasPartition(l.cfg.PartitionID) { + level.Info(l.logger).Log("msg", "partition found in the ring", "partition", l.cfg.PartitionID) + return true, nil + } + + level.Info(l.logger).Log("msg", "partition not found in the ring", "partition", l.cfg.PartitionID) + return false, nil + } + + for { + if exists, err := checkPartitionExist(); err != nil { + return err + } else if exists { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + + case <-pollTicker.C: + // Throttle. + } + } + + // Ensure the instance is added as partition owner. + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return ring.AddOrUpdateOwner(l.cfg.InstanceID, OwnerActive, l.cfg.PartitionID, time.Now()), nil + }) +} + +// reconcileOwnedPartition reconciles the owned partition. +// This function should be called periodically. +func (l *PartitionInstanceLifecycler) reconcileOwnedPartition(ctx context.Context, now time.Time) { + const reconcileType = "owned-partition" + l.reconcilesTotal.WithLabelValues(reconcileType).Inc() + + err := l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + partitionID := l.cfg.PartitionID + + partition, exists := ring.Partitions[partitionID] + if !exists { + return false, ErrPartitionDoesNotExist + } + + // A pending partition should be switched to active if there are enough owners that + // have been added since more than the waiting period. + if partition.IsPending() && ring.PartitionOwnersCountUpdatedBefore(partitionID, now.Add(-l.cfg.WaitOwnersDurationOnPending)) >= l.cfg.WaitOwnersCountOnPending { + level.Info(l.logger).Log("msg", "switching partition state because enough owners have been registered and minimum waiting time has elapsed", "partition", l.cfg.PartitionID, "from_state", PartitionPending, "to_state", PartitionActive) + return ring.UpdatePartitionState(partitionID, PartitionActive, now), nil + } + + return false, nil + }) + + if err != nil { + l.reconcilesFailedTotal.WithLabelValues(reconcileType).Inc() + level.Warn(l.logger).Log("msg", "failed to reconcile owned partition", "partition", l.cfg.PartitionID, "err", err) + } +} + +// reconcileOtherPartitions reconciles other partitions. +// This function should be called periodically. +func (l *PartitionInstanceLifecycler) reconcileOtherPartitions(ctx context.Context, now time.Time) { + const reconcileType = "other-partitions" + l.reconcilesTotal.WithLabelValues(reconcileType).Inc() + + err := l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + changed := false + + if l.cfg.DeleteInactivePartitionAfterDuration > 0 { + deleteBefore := now.Add(-l.cfg.DeleteInactivePartitionAfterDuration) + + for partitionID, partition := range ring.Partitions { + // Never delete the partition owned by this lifecycler, since it's expected to have at least + // this instance as owner. + if partitionID == l.cfg.PartitionID { + continue + } + + // A partition is safe to be removed only if it's inactive since longer than the wait period + // and it has no owners registered. + if partition.IsInactiveSince(deleteBefore) && ring.PartitionOwnersCount(partitionID) == 0 { + level.Info(l.logger).Log("msg", "removing inactive partition with no owners from ring", "partition", partitionID, "state", partition.State.CleanName(), "state_timestamp", partition.GetStateTime().String()) + ring.RemovePartition(partitionID) + changed = true + } + } + } + + return changed, nil + }) + + if err != nil { + l.reconcilesFailedTotal.WithLabelValues(reconcileType).Inc() + level.Warn(l.logger).Log("msg", "failed to reconcile other partitions", "err", err) + } +} + +func isPartitionStateChangeAllowed(from, to PartitionState) bool { + for _, allowed := range allowedPartitionStateChanges[from] { + if to == allowed { + return true + } + } + + return false +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go b/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go new file mode 100644 index 0000000000000..2fb15d8af98d7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_instance_ring.go @@ -0,0 +1,150 @@ +package ring + +import ( + "fmt" + "time" + + "golang.org/x/exp/slices" +) + +type PartitionRingReader interface { + // PartitionRing returns a snapshot of the PartitionRing. This function must never return nil. + // If the ring is empty or unknown, an empty PartitionRing can be returned. + PartitionRing() *PartitionRing +} + +// PartitionInstanceRing holds a partitions ring and a instances ring, and provide functions +// to look up the intersection of the two (e.g. healthy instances by partition). +type PartitionInstanceRing struct { + partitionsRingReader PartitionRingReader + instancesRing *Ring + heartbeatTimeout time.Duration +} + +func NewPartitionInstanceRing(partitionsRingWatcher PartitionRingReader, instancesRing *Ring, heartbeatTimeout time.Duration) *PartitionInstanceRing { + return &PartitionInstanceRing{ + partitionsRingReader: partitionsRingWatcher, + instancesRing: instancesRing, + heartbeatTimeout: heartbeatTimeout, + } +} + +func (r *PartitionInstanceRing) PartitionRing() *PartitionRing { + return r.partitionsRingReader.PartitionRing() +} + +func (r *PartitionInstanceRing) InstanceRing() *Ring { + return r.instancesRing +} + +// GetReplicationSetsForOperation returns one ReplicationSet for each partition in the ring. +// A ReplicationSet is returned for every partition in ring. If there are no healthy owners +// for a partition, an error is returned. +func (r *PartitionInstanceRing) GetReplicationSetsForOperation(op Operation) ([]ReplicationSet, error) { + partitionsRing := r.PartitionRing() + partitionsRingDesc := partitionsRing.desc + + if len(partitionsRingDesc.Partitions) == 0 { + return nil, ErrEmptyRing + } + + now := time.Now() + result := make([]ReplicationSet, 0, len(partitionsRingDesc.Partitions)) + zonesBuffer := make([]string, 0, 3) // Pre-allocate buffer assuming 3 zones. + + for partitionID := range partitionsRingDesc.Partitions { + ownerIDs := partitionsRing.PartitionOwnerIDs(partitionID) + instances := make([]InstanceDesc, 0, len(ownerIDs)) + + for _, instanceID := range ownerIDs { + instance, err := r.instancesRing.GetInstance(instanceID) + if err != nil { + // If an instance doesn't exist in the instances ring we don't return an error + // but lookup for other instances of the partition. + continue + } + + if !instance.IsHealthy(op, r.heartbeatTimeout, now) { + continue + } + + instances = append(instances, instance) + } + + if len(instances) == 0 { + return nil, fmt.Errorf("partition %d: %w", partitionID, ErrTooManyUnhealthyInstances) + } + + // Count the number of unique zones among instances. + zonesBuffer = uniqueZonesFromInstances(instances, zonesBuffer[:0]) + uniqueZones := len(zonesBuffer) + + result = append(result, ReplicationSet{ + Instances: instances, + + // Partitions has no concept of zone, but we enable it in order to support ring's requests + // minimization feature. + ZoneAwarenessEnabled: true, + + // We need response from at least 1 owner. The assumption is that we have 1 owner per zone + // but it's not guaranteed (depends on how the application was deployed). The safest thing + // we can do here is to just request a successful response from at least 1 zone. + MaxUnavailableZones: uniqueZones - 1, + }) + } + return result, nil +} + +// ShuffleShard wraps PartitionRing.ShuffleShard(). +// +// The PartitionRing embedded in the returned PartitionInstanceRing is based on a snapshot of the partitions ring +// at the time this function gets called. This means that subsequent changes to the partitions ring will not +// be reflected in the returned PartitionInstanceRing. +func (r *PartitionInstanceRing) ShuffleShard(identifier string, size int) (*PartitionInstanceRing, error) { + partitionsSubring, err := r.PartitionRing().ShuffleShard(identifier, size) + if err != nil { + return nil, err + } + + return NewPartitionInstanceRing(newStaticPartitionRingReader(partitionsSubring), r.instancesRing, r.heartbeatTimeout), nil +} + +// ShuffleShardWithLookback wraps PartitionRing.ShuffleShardWithLookback(). +// +// The PartitionRing embedded in the returned PartitionInstanceRing is based on a snapshot of the partitions ring +// at the time this function gets called. This means that subsequent changes to the partitions ring will not +// be reflected in the returned PartitionInstanceRing. +func (r *PartitionInstanceRing) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) (*PartitionInstanceRing, error) { + partitionsSubring, err := r.PartitionRing().ShuffleShardWithLookback(identifier, size, lookbackPeriod, now) + if err != nil { + return nil, err + } + + return NewPartitionInstanceRing(newStaticPartitionRingReader(partitionsSubring), r.instancesRing, r.heartbeatTimeout), nil +} + +type staticPartitionRingReader struct { + ring *PartitionRing +} + +func newStaticPartitionRingReader(ring *PartitionRing) staticPartitionRingReader { + return staticPartitionRingReader{ + ring: ring, + } +} + +func (m staticPartitionRingReader) PartitionRing() *PartitionRing { + return m.ring +} + +// uniqueZonesFromInstances returns the unique list of zones among the input instances. The input buf MUST have +// zero length, but could be capacity in order to avoid memory allocations. +func uniqueZonesFromInstances(instances []InstanceDesc, buf []string) []string { + for _, instance := range instances { + if !slices.Contains(buf, instance.Zone) { + buf = append(buf, instance.Zone) + } + } + + return buf +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring.go b/vendor/github.com/grafana/dskit/ring/partition_ring.go new file mode 100644 index 0000000000000..911de476c865f --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring.go @@ -0,0 +1,487 @@ +package ring + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "strconv" + "time" + + "golang.org/x/exp/slices" + + shardUtil "github.com/grafana/dskit/ring/shard" +) + +var ErrNoActivePartitionFound = fmt.Errorf("no active partition found") + +// PartitionRing holds an immutable view of the partitions ring. +// +// Design principles: +// - Immutable: the PartitionRingDesc hold by PartitionRing is immutable. When PartitionRingDesc changes +// a new instance of PartitionRing should be created. The partitions ring is expected to change infrequently +// (e.g. there's no heartbeat), so creating a new PartitionRing each time the partitions ring changes is +// not expected to have a significant overhead. +type PartitionRing struct { + // desc is a snapshot of the partition ring. This data is immutable and MUST NOT be modified. + desc PartitionRingDesc + + // ringTokens is a sorted list of all tokens registered by all partitions. + ringTokens Tokens + + // partitionByToken is a map where they key is a registered token and the value is ID of the partition + // that registered that token. + partitionByToken map[Token]int32 + + // ownersByPartition is a map where the key is the partition ID and the value is a list of owner IDs. + ownersByPartition map[int32][]string + + // shuffleShardCache is used to cache subrings generated with shuffle sharding. + shuffleShardCache *partitionRingShuffleShardCache + + // activePartitionsCount is a saved count of active partitions to avoid recomputing it. + activePartitionsCount int +} + +func NewPartitionRing(desc PartitionRingDesc) *PartitionRing { + return &PartitionRing{ + desc: desc, + ringTokens: desc.tokens(), + partitionByToken: desc.partitionByToken(), + ownersByPartition: desc.ownersByPartition(), + activePartitionsCount: desc.activePartitionsCount(), + shuffleShardCache: newPartitionRingShuffleShardCache(), + } +} + +// ActivePartitionForKey returns partition for the given key. Only active partitions are considered. +// Only one partition is returned: in other terms, the replication factor is always 1. +func (r *PartitionRing) ActivePartitionForKey(key uint32) (int32, error) { + var ( + start = searchToken(r.ringTokens, key) + iterations = 0 + tokensCount = len(r.ringTokens) + ) + + for i := start; iterations < tokensCount; i++ { + iterations++ + + if i >= tokensCount { + i %= len(r.ringTokens) + } + + token := r.ringTokens[i] + + partitionID, ok := r.partitionByToken[Token(token)] + if !ok { + return 0, ErrInconsistentTokensInfo + } + + partition, ok := r.desc.Partitions[partitionID] + if !ok { + return 0, ErrInconsistentTokensInfo + } + + // If the partition is not active we'll keep walking the ring. + if partition.IsActive() { + return partitionID, nil + } + } + + return 0, ErrNoActivePartitionFound +} + +// ShuffleShardSize returns number of partitions that would be in the result of ShuffleShard call with the same size. +func (r *PartitionRing) ShuffleShardSize(size int) int { + if size <= 0 || size > r.activePartitionsCount { + return r.activePartitionsCount + } + + if size < r.activePartitionsCount { + return size + } + return r.activePartitionsCount +} + +// ShuffleShard returns a subring for the provided identifier (eg. a tenant ID) +// and size (number of partitions). +// +// The algorithm used to build the subring is a shuffle sharder based on probabilistic +// hashing. We pick N unique partitions, walking the ring starting from random but +// predictable numbers. The random generator is initialised with a seed based on the +// provided identifier. +// +// This function returns a subring containing ONLY ACTIVE partitions. +// +// This function supports caching. +// +// This implementation guarantees: +// +// - Stability: given the same ring, two invocations returns the same result. +// +// - Consistency: adding/removing 1 partition from the ring generates a resulting +// subring with no more then 1 difference. +// +// - Shuffling: probabilistically, for a large enough cluster each identifier gets a different +// set of instances, with a reduced number of overlapping instances between two identifiers. +func (r *PartitionRing) ShuffleShard(identifier string, size int) (*PartitionRing, error) { + if cached := r.shuffleShardCache.getSubring(identifier, size); cached != nil { + return cached, nil + } + + // No need to pass the time if there's no lookback. + subring, err := r.shuffleShard(identifier, size, 0, time.Time{}) + if err != nil { + return nil, err + } + + r.shuffleShardCache.setSubring(identifier, size, subring) + return subring, nil +} + +// ShuffleShardWithLookback is like ShuffleShard() but the returned subring includes all instances +// that have been part of the identifier's shard in [now - lookbackPeriod, now] time window. +// +// This function can return a mix of ACTIVE and INACTIVE partitions. INACTIVE partitions are only +// included if they were part of the identifier's shard within the lookbackPeriod. PENDING partitions +// are never returned. +// +// This function supports caching, but the cache will only be effective if successive calls for the +// same identifier are with the same lookbackPeriod and increasing values of now. +func (r *PartitionRing) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) (*PartitionRing, error) { + if cached := r.shuffleShardCache.getSubringWithLookback(identifier, size, lookbackPeriod, now); cached != nil { + return cached, nil + } + + subring, err := r.shuffleShard(identifier, size, lookbackPeriod, now) + if err != nil { + return nil, err + } + + r.shuffleShardCache.setSubringWithLookback(identifier, size, lookbackPeriod, now, subring) + return subring, nil +} + +func (r *PartitionRing) shuffleShard(identifier string, size int, lookbackPeriod time.Duration, now time.Time) (*PartitionRing, error) { + // If the size is too small or too large, run with a size equal to the total number of partitions. + // We have to run the function anyway because the logic may filter out some INACTIVE partitions. + if size <= 0 || size >= len(r.desc.Partitions) { + size = len(r.desc.Partitions) + } + + var lookbackUntil int64 + if lookbackPeriod > 0 { + lookbackUntil = now.Add(-lookbackPeriod).Unix() + } + + // Initialise the random generator used to select instances in the ring. + // There are no zones + random := rand.New(rand.NewSource(shardUtil.ShuffleShardSeed(identifier, ""))) + + // To select one more instance while guaranteeing the "consistency" property, + // we do pick a random value from the generator and resolve uniqueness collisions + // (if any) continuing walking the ring. + tokensCount := len(r.ringTokens) + + result := make(map[int32]struct{}, size) + exclude := map[int32]struct{}{} + + for len(result) < size { + start := searchToken(r.ringTokens, random.Uint32()) + iterations := 0 + found := false + + for p := start; !found && iterations < tokensCount; p++ { + iterations++ + + // Wrap p around in the ring. + if p >= tokensCount { + p %= tokensCount + } + + pid, ok := r.partitionByToken[Token(r.ringTokens[p])] + if !ok { + return nil, ErrInconsistentTokensInfo + } + + // Ensure the partition has not already been included or excluded. + if _, ok := result[pid]; ok { + continue + } + if _, ok := exclude[pid]; ok { + continue + } + + p, ok := r.desc.Partitions[pid] + if !ok { + return nil, ErrInconsistentTokensInfo + } + + // PENDING partitions should be skipped because they're not ready for read or write yet, + // and they don't need to be looked back. + if p.IsPending() { + exclude[pid] = struct{}{} + continue + } + + var ( + withinLookbackPeriod = lookbackPeriod > 0 && p.GetStateTimestamp() >= lookbackUntil + shouldExtend = withinLookbackPeriod + shouldInclude = p.IsActive() || withinLookbackPeriod + ) + + // Either include or exclude the found partition. + if shouldInclude { + result[pid] = struct{}{} + } else { + exclude[pid] = struct{}{} + } + + // Extend the shard, if requested. + if shouldExtend { + size++ + } + + // We can stop searching for other partitions only if this partition was included + // and no extension was requested, which means it's the "stop partition" for this cycle. + if shouldInclude && !shouldExtend { + found = true + } + } + + // If we iterated over all tokens, and no new partition has been found, we can stop looking for more partitions. + if !found { + break + } + } + + return NewPartitionRing(r.desc.WithPartitions(result)), nil +} + +// PartitionsCount returns the number of partitions in the ring. +func (r *PartitionRing) PartitionsCount() int { + return len(r.desc.Partitions) +} + +// ActivePartitionsCount returns the number of active partitions in the ring. +func (r *PartitionRing) ActivePartitionsCount() int { + return r.activePartitionsCount +} + +// Partitions returns the partitions in the ring. +// The returned slice is a deep copy, so the caller can freely manipulate it. +func (r *PartitionRing) Partitions() []PartitionDesc { + res := make([]PartitionDesc, 0, len(r.desc.Partitions)) + + for _, partition := range r.desc.Partitions { + res = append(res, partition.Clone()) + } + + return res +} + +// PartitionIDs returns a sorted list of all partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) PartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id := range r.desc.Partitions { + ids = append(ids, id) + } + + slices.Sort(ids) + return ids +} + +// PendingPartitionIDs returns a sorted list of all PENDING partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) PendingPartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id, partition := range r.desc.Partitions { + if partition.IsPending() { + ids = append(ids, id) + } + } + + slices.Sort(ids) + return ids +} + +// ActivePartitionIDs returns a sorted list of all ACTIVE partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) ActivePartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id, partition := range r.desc.Partitions { + if partition.IsActive() { + ids = append(ids, id) + } + } + + slices.Sort(ids) + return ids +} + +// InactivePartitionIDs returns a sorted list of all INACTIVE partition IDs in the ring. +// The returned slice is a copy, so the caller can freely manipulate it. +func (r *PartitionRing) InactivePartitionIDs() []int32 { + ids := make([]int32, 0, len(r.desc.Partitions)) + + for id, partition := range r.desc.Partitions { + if partition.IsInactive() { + ids = append(ids, id) + } + } + + slices.Sort(ids) + return ids +} + +// PartitionOwnerIDs returns a list of owner IDs for the given partitionID. +// The returned slice is NOT a copy and should be never modified by the caller. +func (r *PartitionRing) PartitionOwnerIDs(partitionID int32) (doNotModify []string) { + return r.ownersByPartition[partitionID] +} + +// PartitionOwnerIDsCopy is like PartitionOwnerIDs(), but the returned slice is a copy, +// so the caller can freely manipulate it. +func (r *PartitionRing) PartitionOwnerIDsCopy(partitionID int32) []string { + ids := r.ownersByPartition[partitionID] + if len(ids) == 0 { + return nil + } + + return slices.Clone(ids) +} + +func (r *PartitionRing) String() string { + buf := bytes.Buffer{} + for pid, pd := range r.desc.Partitions { + buf.WriteString(fmt.Sprintf(" %d:%v", pid, pd.State.String())) + } + + return fmt.Sprintf("PartitionRing{ownersCount: %d, partitionsCount: %d, partitions: {%s}}", len(r.desc.Owners), len(r.desc.Partitions), buf.String()) +} + +// GetTokenRangesForPartition returns token-range owned by given partition. Note that this +// method does NOT take partition state into account, so if only active partitions should be +// considered, then PartitionRing with only active partitions must be created first (e.g. using ShuffleShard method). +func (r *PartitionRing) GetTokenRangesForPartition(partitionID int32) (TokenRanges, error) { + partition, ok := r.desc.Partitions[partitionID] + if !ok { + return nil, ErrPartitionDoesNotExist + } + + // 1 range (2 values) per token + one additional if we need to split the rollover range. + ranges := make(TokenRanges, 0, 2*(len(partition.Tokens)+1)) + + addRange := func(start, end uint32) { + // check if we can group ranges. If so, we just update end of previous range. + if len(ranges) > 0 && ranges[len(ranges)-1] == start-1 { + ranges[len(ranges)-1] = end + } else { + ranges = append(ranges, start, end) + } + } + + // "last" range is range that includes token math.MaxUint32. + ownsLastRange := false + startOfLastRange := uint32(0) + + // We start with all tokens, but will remove tokens we already skipped, to let binary search do less work. + ringTokens := r.ringTokens + + for iter, t := range partition.Tokens { + lastOwnedToken := t - 1 + + ix := searchToken(ringTokens, lastOwnedToken) + prevIx := ix - 1 + + if prevIx < 0 { + // We can only find "last" range during first iteration. + if iter > 0 { + return nil, ErrInconsistentTokensInfo + } + + prevIx = len(ringTokens) - 1 + ownsLastRange = true + + startOfLastRange = ringTokens[prevIx] + + // We can only claim token 0 if our actual token in the ring (which is exclusive end of range) was not 0. + if t > 0 { + addRange(0, lastOwnedToken) + } + } else { + addRange(ringTokens[prevIx], lastOwnedToken) + } + + // Reduce number of tokens we need to search through. We keep current token to serve as min boundary for next search, + // to make sure we don't find another "last" range (where prevIx < 0). + ringTokens = ringTokens[ix:] + } + + if ownsLastRange { + addRange(startOfLastRange, math.MaxUint32) + } + + return ranges, nil +} + +// ActivePartitionBatchRing wraps PartitionRing and implements DoBatchRing to lookup ACTIVE partitions. +type ActivePartitionBatchRing struct { + ring *PartitionRing +} + +func NewActivePartitionBatchRing(ring *PartitionRing) *ActivePartitionBatchRing { + return &ActivePartitionBatchRing{ + ring: ring, + } +} + +// InstancesCount returns the number of active partitions in the ring. +// +// InstancesCount implements DoBatchRing.InstancesCount. +func (r *ActivePartitionBatchRing) InstancesCount() int { + return r.ring.ActivePartitionsCount() +} + +// ReplicationFactor returns 1 as partitions replication factor: an entry (looked by key via Get()) +// is always stored in 1 and only 1 partition. +// +// ReplicationFactor implements DoBatchRing.ReplicationFactor. +func (r *ActivePartitionBatchRing) ReplicationFactor() int { + return 1 +} + +// Get implements DoBatchRing.Get. +func (r *ActivePartitionBatchRing) Get(key uint32, _ Operation, bufInstances []InstanceDesc, _, _ []string) (ReplicationSet, error) { + partitionID, err := r.ring.ActivePartitionForKey(key) + if err != nil { + return ReplicationSet{}, err + } + + // Ensure we have enough capacity in bufInstances. + if cap(bufInstances) < 1 { + bufInstances = []InstanceDesc{{}} + } else { + bufInstances = bufInstances[:1] + } + + partitionIDString := strconv.Itoa(int(partitionID)) + + bufInstances[0] = InstanceDesc{ + Addr: partitionIDString, + Timestamp: 0, + State: ACTIVE, + Id: partitionIDString, + } + + return ReplicationSet{ + Instances: bufInstances, + MaxErrors: 0, + MaxUnavailableZones: 0, + ZoneAwarenessEnabled: false, + }, nil +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go new file mode 100644 index 0000000000000..8f47b1c562ea7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.pb.go @@ -0,0 +1,1545 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: partition_ring_desc.proto + +package ring + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PartitionState int32 + +const ( + PartitionUnknown PartitionState = 0 + // Pending partition is a partition that is about to be switched to ACTIVE. This state is used + // to let owners to attach to the partition and get ready to handle the partition. + // + // When a partition is in this state, it must not be used for writing or reading. + PartitionPending PartitionState = 1 + // Active partition in read-write mode. + PartitionActive PartitionState = 2 + // Inactive partition in read-only mode. This partition will be deleted after a grace period, + // unless its state changes to Active again. + PartitionInactive PartitionState = 3 + // Deleted partition. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a partition has been deleted. + PartitionDeleted PartitionState = 4 +) + +var PartitionState_name = map[int32]string{ + 0: "PartitionUnknown", + 1: "PartitionPending", + 2: "PartitionActive", + 3: "PartitionInactive", + 4: "PartitionDeleted", +} + +var PartitionState_value = map[string]int32{ + "PartitionUnknown": 0, + "PartitionPending": 1, + "PartitionActive": 2, + "PartitionInactive": 3, + "PartitionDeleted": 4, +} + +func (PartitionState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{0} +} + +type OwnerState int32 + +const ( + OwnerUnknown OwnerState = 0 + // Active owner. + OwnerActive OwnerState = 1 + // Deleted owner. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a owner has been deleted. Owners in this state + // are removed before client can see them. + OwnerDeleted OwnerState = 2 +) + +var OwnerState_name = map[int32]string{ + 0: "OwnerUnknown", + 1: "OwnerActive", + 2: "OwnerDeleted", +} + +var OwnerState_value = map[string]int32{ + "OwnerUnknown": 0, + "OwnerActive": 1, + "OwnerDeleted": 2, +} + +func (OwnerState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{1} +} + +// PartitionRingDesc holds the state of the partitions ring. +type PartitionRingDesc struct { + // Mapping between partition ID and partition info. + Partitions map[int32]PartitionDesc `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mapping between instance ID and partition ownership info. + Owners map[string]OwnerDesc `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *PartitionRingDesc) Reset() { *m = PartitionRingDesc{} } +func (*PartitionRingDesc) ProtoMessage() {} +func (*PartitionRingDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{0} +} +func (m *PartitionRingDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartitionRingDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartitionRingDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartitionRingDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartitionRingDesc.Merge(m, src) +} +func (m *PartitionRingDesc) XXX_Size() int { + return m.Size() +} +func (m *PartitionRingDesc) XXX_DiscardUnknown() { + xxx_messageInfo_PartitionRingDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_PartitionRingDesc proto.InternalMessageInfo + +func (m *PartitionRingDesc) GetPartitions() map[int32]PartitionDesc { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *PartitionRingDesc) GetOwners() map[string]OwnerDesc { + if m != nil { + return m.Owners + } + return nil +} + +// PartitionDesc holds the state of a single partition. +type PartitionDesc struct { + // The partition ID. This value is the same as the key in the partitions map in PartitionRingDesc. + Id int32 `protobuf:"varint,4,opt,name=id,proto3" json:"id,omitempty"` + // Unique tokens, generated with deterministic token generator. Tokens MUST be immutable: + // if tokens get changed, the change will not be propagated via memberlist. + Tokens []uint32 `protobuf:"varint,1,rep,packed,name=tokens,proto3" json:"tokens,omitempty"` + // The state of the partition. + State PartitionState `protobuf:"varint,2,opt,name=state,proto3,enum=ring.PartitionState" json:"state,omitempty"` + // Unix timestamp (with seconds precision) of when has the state changed last time for this partition. + StateTimestamp int64 `protobuf:"varint,3,opt,name=stateTimestamp,proto3" json:"stateTimestamp,omitempty"` +} + +func (m *PartitionDesc) Reset() { *m = PartitionDesc{} } +func (*PartitionDesc) ProtoMessage() {} +func (*PartitionDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{1} +} +func (m *PartitionDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartitionDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartitionDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartitionDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartitionDesc.Merge(m, src) +} +func (m *PartitionDesc) XXX_Size() int { + return m.Size() +} +func (m *PartitionDesc) XXX_DiscardUnknown() { + xxx_messageInfo_PartitionDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_PartitionDesc proto.InternalMessageInfo + +func (m *PartitionDesc) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *PartitionDesc) GetTokens() []uint32 { + if m != nil { + return m.Tokens + } + return nil +} + +func (m *PartitionDesc) GetState() PartitionState { + if m != nil { + return m.State + } + return PartitionUnknown +} + +func (m *PartitionDesc) GetStateTimestamp() int64 { + if m != nil { + return m.StateTimestamp + } + return 0 +} + +// OwnerDesc holds the information of a partition owner. +type OwnerDesc struct { + // Partition that belongs to this owner. A owner can own only 1 partition, but 1 partition can be + // owned by multiple owners. + OwnedPartition int32 `protobuf:"varint,1,opt,name=ownedPartition,proto3" json:"ownedPartition,omitempty"` + // The owner state. This field is used to propagate deletions via memberlist. + State OwnerState `protobuf:"varint,2,opt,name=state,proto3,enum=ring.OwnerState" json:"state,omitempty"` + // Unix timestamp (with seconds precision) of when the data for the owner has been updated the last time. + // This timestamp is used to resolve conflicts when merging updates via memberlist (the most recent + // update wins). + UpdatedTimestamp int64 `protobuf:"varint,3,opt,name=updatedTimestamp,proto3" json:"updatedTimestamp,omitempty"` +} + +func (m *OwnerDesc) Reset() { *m = OwnerDesc{} } +func (*OwnerDesc) ProtoMessage() {} +func (*OwnerDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_4df2762174d93dc4, []int{2} +} +func (m *OwnerDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OwnerDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OwnerDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerDesc.Merge(m, src) +} +func (m *OwnerDesc) XXX_Size() int { + return m.Size() +} +func (m *OwnerDesc) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_OwnerDesc proto.InternalMessageInfo + +func (m *OwnerDesc) GetOwnedPartition() int32 { + if m != nil { + return m.OwnedPartition + } + return 0 +} + +func (m *OwnerDesc) GetState() OwnerState { + if m != nil { + return m.State + } + return OwnerUnknown +} + +func (m *OwnerDesc) GetUpdatedTimestamp() int64 { + if m != nil { + return m.UpdatedTimestamp + } + return 0 +} + +func init() { + proto.RegisterEnum("ring.PartitionState", PartitionState_name, PartitionState_value) + proto.RegisterEnum("ring.OwnerState", OwnerState_name, OwnerState_value) + proto.RegisterType((*PartitionRingDesc)(nil), "ring.PartitionRingDesc") + proto.RegisterMapType((map[string]OwnerDesc)(nil), "ring.PartitionRingDesc.OwnersEntry") + proto.RegisterMapType((map[int32]PartitionDesc)(nil), "ring.PartitionRingDesc.PartitionsEntry") + proto.RegisterType((*PartitionDesc)(nil), "ring.PartitionDesc") + proto.RegisterType((*OwnerDesc)(nil), "ring.OwnerDesc") +} + +func init() { proto.RegisterFile("partition_ring_desc.proto", fileDescriptor_4df2762174d93dc4) } + +var fileDescriptor_4df2762174d93dc4 = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0x7d, 0x76, 0x12, 0xa9, 0x2f, 0x34, 0x39, 0xae, 0x05, 0x99, 0x0c, 0x47, 0x14, 0x44, + 0x09, 0x91, 0x48, 0xa5, 0xc0, 0x80, 0xd8, 0x52, 0x95, 0x01, 0x24, 0x44, 0x65, 0x60, 0xae, 0x9c, + 0xf8, 0x30, 0xa7, 0x34, 0x77, 0x91, 0x7d, 0x6e, 0xd5, 0x05, 0xb1, 0x31, 0xb0, 0xf0, 0x31, 0xf8, + 0x22, 0x48, 0x1d, 0x33, 0x76, 0x42, 0xc4, 0x59, 0x18, 0xfb, 0x11, 0x90, 0xcf, 0xae, 0x63, 0xbb, + 0xea, 0x76, 0xef, 0x7f, 0xef, 0xfd, 0xfe, 0xff, 0x3b, 0x9f, 0xe1, 0xc1, 0xc2, 0x0d, 0x14, 0x57, + 0x5c, 0x8a, 0xe3, 0x80, 0x0b, 0xff, 0xd8, 0x63, 0xe1, 0x74, 0xb8, 0x08, 0xa4, 0x92, 0xa4, 0x96, + 0x08, 0x9d, 0x67, 0x3e, 0x57, 0x5f, 0xa2, 0xc9, 0x70, 0x2a, 0xe7, 0xfb, 0xbe, 0xf4, 0xe5, 0xbe, + 0xde, 0x9c, 0x44, 0x9f, 0x75, 0xa5, 0x0b, 0xbd, 0x4a, 0x87, 0x7a, 0xbf, 0x4d, 0xb8, 0x7b, 0x74, + 0x8d, 0x74, 0xb8, 0xf0, 0x0f, 0x59, 0x38, 0x25, 0xef, 0x00, 0x72, 0x9f, 0xd0, 0x46, 0x5d, 0xab, + 0xdf, 0x1c, 0x3d, 0x19, 0x26, 0xfc, 0xe1, 0x8d, 0xe6, 0x8d, 0x12, 0xbe, 0x16, 0x2a, 0x38, 0x3f, + 0xa8, 0x5d, 0xfc, 0x79, 0x68, 0x38, 0x05, 0x00, 0x19, 0x43, 0x43, 0x9e, 0x09, 0x16, 0x84, 0xb6, + 0xa9, 0x51, 0x8f, 0x6e, 0x43, 0xbd, 0xd7, 0x5d, 0x45, 0x4c, 0x36, 0xd8, 0x71, 0xa0, 0x5d, 0xf1, + 0x21, 0x18, 0xac, 0x19, 0x3b, 0xb7, 0x51, 0x17, 0xf5, 0xeb, 0x4e, 0xb2, 0x24, 0x4f, 0xa1, 0x7e, + 0xea, 0x9e, 0x44, 0xcc, 0x36, 0xbb, 0xa8, 0xdf, 0x1c, 0xed, 0x54, 0x6c, 0x12, 0x0b, 0x27, 0xed, + 0x78, 0x65, 0xbe, 0x44, 0x9d, 0xb7, 0xd0, 0x2c, 0x18, 0x16, 0x79, 0x5b, 0x29, 0xef, 0x71, 0x99, + 0xd7, 0x4e, 0x79, 0x7a, 0xa6, 0xc2, 0xea, 0xfd, 0x40, 0xb0, 0x5d, 0x32, 0x22, 0x2d, 0x30, 0xb9, + 0x67, 0xd7, 0x74, 0x3a, 0x93, 0x7b, 0xe4, 0x3e, 0x34, 0x94, 0x9c, 0xb1, 0xec, 0x3e, 0xb7, 0x9d, + 0xac, 0x22, 0x03, 0xa8, 0x87, 0xca, 0x55, 0xa9, 0x49, 0x6b, 0xb4, 0x5b, 0x09, 0xfd, 0x21, 0xd9, + 0x73, 0xd2, 0x16, 0xb2, 0x07, 0x2d, 0xbd, 0xf8, 0xc8, 0xe7, 0x2c, 0x54, 0xee, 0x7c, 0x61, 0x5b, + 0x5d, 0xd4, 0xb7, 0x9c, 0x8a, 0xda, 0xfb, 0x8e, 0x60, 0x2b, 0x8f, 0x99, 0x4c, 0x25, 0xb7, 0xe8, + 0xe5, 0xcc, 0xec, 0xce, 0x2a, 0x2a, 0xd9, 0x2b, 0x27, 0xc1, 0x85, 0xe3, 0x96, 0x52, 0x0c, 0x00, + 0x47, 0x0b, 0xcf, 0x55, 0xcc, 0xab, 0xe6, 0xb8, 0xa1, 0x0f, 0xbe, 0x42, 0xab, 0x7c, 0x14, 0xb2, + 0x0b, 0x38, 0x57, 0x3e, 0x89, 0x99, 0x90, 0x67, 0x02, 0x1b, 0x25, 0xf5, 0x88, 0x09, 0x8f, 0x0b, + 0x1f, 0x23, 0xb2, 0x53, 0xf8, 0xea, 0xe3, 0xa9, 0xe2, 0xa7, 0x0c, 0x9b, 0xe4, 0x5e, 0xe1, 0xc5, + 0xbe, 0x11, 0x6e, 0x2a, 0x5b, 0x25, 0xc2, 0x21, 0x3b, 0x61, 0x8a, 0x79, 0xb8, 0x36, 0x18, 0x03, + 0x6c, 0x0e, 0x40, 0x30, 0xdc, 0xd1, 0xd5, 0xc6, 0xb7, 0x9d, 0xbd, 0x81, 0x8c, 0x8e, 0xf2, 0x96, + 0x6b, 0x84, 0x79, 0xf0, 0x62, 0xb9, 0xa2, 0xc6, 0xe5, 0x8a, 0x1a, 0x57, 0x2b, 0x8a, 0xbe, 0xc5, + 0x14, 0xfd, 0x8a, 0x29, 0xba, 0x88, 0x29, 0x5a, 0xc6, 0x14, 0xfd, 0x8d, 0x29, 0xfa, 0x17, 0x53, + 0xe3, 0x2a, 0xa6, 0xe8, 0xe7, 0x9a, 0x1a, 0xcb, 0x35, 0x35, 0x2e, 0xd7, 0xd4, 0x98, 0x34, 0xf4, + 0xff, 0xf5, 0xfc, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xd1, 0xa7, 0xbd, 0xb1, 0x03, 0x00, + 0x00, +} + +func (x PartitionState) String() string { + s, ok := PartitionState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x OwnerState) String() string { + s, ok := OwnerState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *PartitionRingDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PartitionRingDesc) + if !ok { + that2, ok := that.(PartitionRingDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Partitions) != len(that1.Partitions) { + return false + } + for i := range this.Partitions { + a := this.Partitions[i] + b := that1.Partitions[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Owners) != len(that1.Owners) { + return false + } + for i := range this.Owners { + a := this.Owners[i] + b := that1.Owners[i] + if !(&a).Equal(&b) { + return false + } + } + return true +} +func (this *PartitionDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PartitionDesc) + if !ok { + that2, ok := that.(PartitionDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if len(this.Tokens) != len(that1.Tokens) { + return false + } + for i := range this.Tokens { + if this.Tokens[i] != that1.Tokens[i] { + return false + } + } + if this.State != that1.State { + return false + } + if this.StateTimestamp != that1.StateTimestamp { + return false + } + return true +} +func (this *OwnerDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OwnerDesc) + if !ok { + that2, ok := that.(OwnerDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.OwnedPartition != that1.OwnedPartition { + return false + } + if this.State != that1.State { + return false + } + if this.UpdatedTimestamp != that1.UpdatedTimestamp { + return false + } + return true +} +func (this *PartitionRingDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&ring.PartitionRingDesc{") + keysForPartitions := make([]int32, 0, len(this.Partitions)) + for k, _ := range this.Partitions { + keysForPartitions = append(keysForPartitions, k) + } + github_com_gogo_protobuf_sortkeys.Int32s(keysForPartitions) + mapStringForPartitions := "map[int32]PartitionDesc{" + for _, k := range keysForPartitions { + mapStringForPartitions += fmt.Sprintf("%#v: %#v,", k, this.Partitions[k]) + } + mapStringForPartitions += "}" + if this.Partitions != nil { + s = append(s, "Partitions: "+mapStringForPartitions+",\n") + } + keysForOwners := make([]string, 0, len(this.Owners)) + for k, _ := range this.Owners { + keysForOwners = append(keysForOwners, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOwners) + mapStringForOwners := "map[string]OwnerDesc{" + for _, k := range keysForOwners { + mapStringForOwners += fmt.Sprintf("%#v: %#v,", k, this.Owners[k]) + } + mapStringForOwners += "}" + if this.Owners != nil { + s = append(s, "Owners: "+mapStringForOwners+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PartitionDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&ring.PartitionDesc{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "Tokens: "+fmt.Sprintf("%#v", this.Tokens)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "StateTimestamp: "+fmt.Sprintf("%#v", this.StateTimestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OwnerDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&ring.OwnerDesc{") + s = append(s, "OwnedPartition: "+fmt.Sprintf("%#v", this.OwnedPartition)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "UpdatedTimestamp: "+fmt.Sprintf("%#v", this.UpdatedTimestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringPartitionRingDesc(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *PartitionRingDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartitionRingDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartitionRingDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owners) > 0 { + for k := range m.Owners { + v := m.Owners[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Partitions) > 0 { + for k := range m.Partitions { + v := m.Partitions[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(k)) + i-- + dAtA[i] = 0x8 + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PartitionDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartitionDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartitionDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x20 + } + if m.StateTimestamp != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.StateTimestamp)) + i-- + dAtA[i] = 0x18 + } + if m.State != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tokens) > 0 { + dAtA4 := make([]byte, len(m.Tokens)*10) + var j3 int + for _, num := range m.Tokens { + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OwnerDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OwnerDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UpdatedTimestamp != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.UpdatedTimestamp)) + i-- + dAtA[i] = 0x18 + } + if m.State != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if m.OwnedPartition != 0 { + i = encodeVarintPartitionRingDesc(dAtA, i, uint64(m.OwnedPartition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPartitionRingDesc(dAtA []byte, offset int, v uint64) int { + offset -= sovPartitionRingDesc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartitionRingDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Partitions) > 0 { + for k, v := range m.Partitions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + sovPartitionRingDesc(uint64(k)) + 1 + l + sovPartitionRingDesc(uint64(l)) + n += mapEntrySize + 1 + sovPartitionRingDesc(uint64(mapEntrySize)) + } + } + if len(m.Owners) > 0 { + for k, v := range m.Owners { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovPartitionRingDesc(uint64(len(k))) + 1 + l + sovPartitionRingDesc(uint64(l)) + n += mapEntrySize + 1 + sovPartitionRingDesc(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PartitionDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tokens) > 0 { + l = 0 + for _, e := range m.Tokens { + l += sovPartitionRingDesc(uint64(e)) + } + n += 1 + sovPartitionRingDesc(uint64(l)) + l + } + if m.State != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.State)) + } + if m.StateTimestamp != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.StateTimestamp)) + } + if m.Id != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.Id)) + } + return n +} + +func (m *OwnerDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OwnedPartition != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.OwnedPartition)) + } + if m.State != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.State)) + } + if m.UpdatedTimestamp != 0 { + n += 1 + sovPartitionRingDesc(uint64(m.UpdatedTimestamp)) + } + return n +} + +func sovPartitionRingDesc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPartitionRingDesc(x uint64) (n int) { + return sovPartitionRingDesc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartitionRingDesc) String() string { + if this == nil { + return "nil" + } + keysForPartitions := make([]int32, 0, len(this.Partitions)) + for k, _ := range this.Partitions { + keysForPartitions = append(keysForPartitions, k) + } + github_com_gogo_protobuf_sortkeys.Int32s(keysForPartitions) + mapStringForPartitions := "map[int32]PartitionDesc{" + for _, k := range keysForPartitions { + mapStringForPartitions += fmt.Sprintf("%v: %v,", k, this.Partitions[k]) + } + mapStringForPartitions += "}" + keysForOwners := make([]string, 0, len(this.Owners)) + for k, _ := range this.Owners { + keysForOwners = append(keysForOwners, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOwners) + mapStringForOwners := "map[string]OwnerDesc{" + for _, k := range keysForOwners { + mapStringForOwners += fmt.Sprintf("%v: %v,", k, this.Owners[k]) + } + mapStringForOwners += "}" + s := strings.Join([]string{`&PartitionRingDesc{`, + `Partitions:` + mapStringForPartitions + `,`, + `Owners:` + mapStringForOwners + `,`, + `}`, + }, "") + return s +} +func (this *PartitionDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartitionDesc{`, + `Tokens:` + fmt.Sprintf("%v", this.Tokens) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `StateTimestamp:` + fmt.Sprintf("%v", this.StateTimestamp) + `,`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `}`, + }, "") + return s +} +func (this *OwnerDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerDesc{`, + `OwnedPartition:` + fmt.Sprintf("%v", this.OwnedPartition) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `UpdatedTimestamp:` + fmt.Sprintf("%v", this.UpdatedTimestamp) + `,`, + `}`, + }, "") + return s +} +func valueToStringPartitionRingDesc(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartitionRingDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartitionRingDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartitionRingDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Partitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Partitions == nil { + m.Partitions = make(map[int32]PartitionDesc) + } + var mapkey int32 + mapvalue := &PartitionDesc{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapkey |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PartitionDesc{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Partitions[mapkey] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owners == nil { + m.Owners = make(map[string]OwnerDesc) + } + var mapkey string + mapvalue := &OwnerDesc{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &OwnerDesc{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Owners[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartitionDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartitionDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartitionDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tokens = append(m.Tokens, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthPartitionRingDesc + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Tokens) == 0 { + m.Tokens = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tokens = append(m.Tokens, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Tokens", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= PartitionState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StateTimestamp", wireType) + } + m.StateTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StateTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnedPartition", wireType) + } + m.OwnedPartition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OwnedPartition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= OwnerState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedTimestamp", wireType) + } + m.UpdatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPartitionRingDesc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPartitionRingDesc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPartitionRingDesc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPartitionRingDesc + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthPartitionRingDesc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPartitionRingDesc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPartitionRingDesc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthPartitionRingDesc + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPartitionRingDesc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPartitionRingDesc = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto new file mode 100644 index 0000000000000..d8fb9316f01db --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_desc.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package ring; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// PartitionRingDesc holds the state of the partitions ring. +message PartitionRingDesc { + // Mapping between partition ID and partition info. + map partitions = 1 [(gogoproto.nullable) = false]; + + // Mapping between instance ID and partition ownership info. + map owners = 2 [(gogoproto.nullable) = false]; +} + +// PartitionDesc holds the state of a single partition. +message PartitionDesc { + // The partition ID. This value is the same as the key in the partitions map in PartitionRingDesc. + int32 id = 4; + + // Unique tokens, generated with deterministic token generator. Tokens MUST be immutable: + // if tokens get changed, the change will not be propagated via memberlist. + repeated uint32 tokens = 1; + + // The state of the partition. + PartitionState state = 2; + + // Unix timestamp (with seconds precision) of when has the state changed last time for this partition. + int64 stateTimestamp = 3; +} + +enum PartitionState { + PartitionUnknown = 0; + + // Pending partition is a partition that is about to be switched to ACTIVE. This state is used + // to let owners to attach to the partition and get ready to handle the partition. + // + // When a partition is in this state, it must not be used for writing or reading. + PartitionPending = 1; + + // Active partition in read-write mode. + PartitionActive = 2; + + // Inactive partition in read-only mode. This partition will be deleted after a grace period, + // unless its state changes to Active again. + PartitionInactive = 3; + + // Deleted partition. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a partition has been deleted. + PartitionDeleted = 4; +} + +// OwnerDesc holds the information of a partition owner. +message OwnerDesc { + // Partition that belongs to this owner. A owner can own only 1 partition, but 1 partition can be + // owned by multiple owners. + int32 ownedPartition = 1; + + // The owner state. This field is used to propagate deletions via memberlist. + OwnerState state = 2; + + // Unix timestamp (with seconds precision) of when the data for the owner has been updated the last time. + // This timestamp is used to resolve conflicts when merging updates via memberlist (the most recent + // update wins). + int64 updatedTimestamp = 3; +} + +enum OwnerState { + OwnerUnknown = 0; + + // Active owner. + OwnerActive = 1; + + // Deleted owner. This state is not visible to ring clients: it's only used to propagate + // via memberlist the information that a owner has been deleted. Owners in this state + // are removed before client can see them. + OwnerDeleted = 2; +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go b/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go new file mode 100644 index 0000000000000..a816693e55caf --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_editor.go @@ -0,0 +1,64 @@ +package ring + +import ( + "context" + "time" + + "github.com/pkg/errors" + + "github.com/grafana/dskit/kv" +) + +// PartitionRingEditor is standalone component that can be used to modify the partitions ring. +// If you want to implement the partition lifecycle you should use PartitionInstanceLifecycler instead. +type PartitionRingEditor struct { + ringKey string + store kv.Client +} + +func NewPartitionRingEditor(ringKey string, store kv.Client) *PartitionRingEditor { + return &PartitionRingEditor{ + ringKey: ringKey, + store: store, + } +} + +// ChangePartitionState changes the partition state to toState. +// This function returns ErrPartitionDoesNotExist if the partition doesn't exist, +// and ErrPartitionStateChangeNotAllowed if the state change is not allowed. +func (l *PartitionRingEditor) ChangePartitionState(ctx context.Context, partitionID int32, toState PartitionState) error { + return l.updateRing(ctx, func(ring *PartitionRingDesc) (bool, error) { + return changePartitionState(ring, partitionID, toState) + }) +} + +func (l *PartitionRingEditor) updateRing(ctx context.Context, update func(ring *PartitionRingDesc) (bool, error)) error { + return l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + ringDesc := GetOrCreatePartitionRingDesc(in) + + if changed, err := update(ringDesc); err != nil { + return nil, false, err + } else if !changed { + return nil, false, nil + } + + return ringDesc, true, nil + }) +} + +func changePartitionState(ring *PartitionRingDesc, partitionID int32, toState PartitionState) (changed bool, _ error) { + partition, exists := ring.Partitions[partitionID] + if !exists { + return false, ErrPartitionDoesNotExist + } + + if partition.State == toState { + return false, nil + } + + if !isPartitionStateChangeAllowed(partition.State, toState) { + return false, errors.Wrapf(ErrPartitionStateChangeNotAllowed, "change partition state from %s to %s", partition.State.CleanName(), toState.CleanName()) + } + + return ring.UpdatePartitionState(partitionID, toState, time.Now()), nil +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_http.go b/vendor/github.com/grafana/dskit/ring/partition_ring_http.go new file mode 100644 index 0000000000000..8e58c58c7afc8 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_http.go @@ -0,0 +1,158 @@ +package ring + +import ( + "context" + _ "embed" + "fmt" + "html/template" + "net/http" + "sort" + "strconv" + "time" + + "golang.org/x/exp/slices" +) + +//go:embed partition_ring_status.gohtml +var partitionRingPageContent string +var partitionRingPageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{ + "mod": func(i, j int32) bool { + return i%j == 0 + }, + "formatTimestamp": func(ts time.Time) string { + return ts.Format("2006-01-02 15:04:05 MST") + }, +}).Parse(partitionRingPageContent)) + +type PartitionRingUpdater interface { + ChangePartitionState(ctx context.Context, partitionID int32, toState PartitionState) error +} + +type PartitionRingPageHandler struct { + reader PartitionRingReader + updater PartitionRingUpdater +} + +func NewPartitionRingPageHandler(reader PartitionRingReader, updater PartitionRingUpdater) *PartitionRingPageHandler { + return &PartitionRingPageHandler{ + reader: reader, + updater: updater, + } +} + +func (h *PartitionRingPageHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case http.MethodGet: + h.handleGetRequest(w, req) + case http.MethodPost: + h.handlePostRequest(w, req) + default: + http.Error(w, "Unsupported HTTP method", http.StatusMethodNotAllowed) + } +} + +func (h *PartitionRingPageHandler) handleGetRequest(w http.ResponseWriter, req *http.Request) { + var ( + ring = h.reader.PartitionRing() + ringDesc = ring.desc + ) + + // Prepare the data to render partitions in the page. + partitionsByID := make(map[int32]partitionPageData, len(ringDesc.Partitions)) + for id, partition := range ringDesc.Partitions { + owners := ring.PartitionOwnerIDsCopy(id) + slices.Sort(owners) + + partitionsByID[id] = partitionPageData{ + ID: id, + Corrupted: false, + State: partition.State, + StateTimestamp: partition.GetStateTime(), + OwnerIDs: owners, + } + } + + // Look for owners of non-existing partitions. We want to provide visibility for such case + // and we report the partition in corrupted state. + for ownerID, owner := range ringDesc.Owners { + partition, exists := partitionsByID[owner.OwnedPartition] + + if !exists { + partition = partitionPageData{ + ID: owner.OwnedPartition, + Corrupted: true, + State: PartitionUnknown, + StateTimestamp: time.Time{}, + OwnerIDs: []string{ownerID}, + } + + partitionsByID[owner.OwnedPartition] = partition + } + + if !slices.Contains(partition.OwnerIDs, ownerID) { + partition.OwnerIDs = append(partition.OwnerIDs, ownerID) + partitionsByID[owner.OwnedPartition] = partition + } + } + + // Covert partitions to a list and sort it by ID. + partitions := make([]partitionPageData, 0, len(partitionsByID)) + + for _, partition := range partitionsByID { + partitions = append(partitions, partition) + } + + sort.Slice(partitions, func(i, j int) bool { + return partitions[i].ID < partitions[j].ID + }) + + renderHTTPResponse(w, partitionRingPageData{ + Partitions: partitions, + PartitionStateChanges: map[PartitionState]PartitionState{ + PartitionPending: PartitionActive, + PartitionActive: PartitionInactive, + PartitionInactive: PartitionActive, + }, + }, partitionRingPageTemplate, req) +} + +func (h *PartitionRingPageHandler) handlePostRequest(w http.ResponseWriter, req *http.Request) { + if req.FormValue("action") == "change_state" { + partitionID, err := strconv.Atoi(req.FormValue("partition_id")) + if err != nil { + http.Error(w, fmt.Sprintf("invalid partition ID: %s", err.Error()), http.StatusBadRequest) + return + } + + toState, ok := PartitionState_value[req.FormValue("partition_state")] + if !ok { + http.Error(w, "invalid partition state", http.StatusBadRequest) + return + } + + if err := h.updater.ChangePartitionState(req.Context(), int32(partitionID), PartitionState(toState)); err != nil { + http.Error(w, fmt.Sprintf("failed to change partition state: %s", err.Error()), http.StatusBadRequest) + return + } + } + + // Implement PRG pattern to prevent double-POST and work with CSRF middleware. + // https://en.wikipedia.org/wiki/Post/Redirect/Get + w.Header().Set("Location", "#") + w.WriteHeader(http.StatusFound) +} + +type partitionRingPageData struct { + Partitions []partitionPageData `json:"partitions"` + + // PartitionStateChanges maps the allowed state changes through the UI. + PartitionStateChanges map[PartitionState]PartitionState `json:"-"` +} + +type partitionPageData struct { + ID int32 `json:"id"` + Corrupted bool `json:"corrupted"` + State PartitionState `json:"state"` + StateTimestamp time.Time `json:"state_timestamp"` + OwnerIDs []string `json:"owner_ids"` +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_model.go b/vendor/github.com/grafana/dskit/ring/partition_ring_model.go new file mode 100644 index 0000000000000..c95380756a3c5 --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_model.go @@ -0,0 +1,460 @@ +package ring + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "golang.org/x/exp/slices" + + "github.com/grafana/dskit/kv/codec" + "github.com/grafana/dskit/kv/memberlist" +) + +type partitionRingCodec struct { + codec.Codec +} + +// Decode wraps Codec.Decode and ensure PartitionRingDesc maps are not nil. +func (c *partitionRingCodec) Decode(in []byte) (interface{}, error) { + out, err := c.Codec.Decode(in) + if err != nil { + return out, err + } + + // Ensure maps are initialised. This makes working with PartitionRingDesc more convenient. + if actual, ok := out.(*PartitionRingDesc); ok { + if actual.Partitions == nil { + actual.Partitions = map[int32]PartitionDesc{} + } + if actual.Owners == nil { + actual.Owners = map[string]OwnerDesc{} + } + } + + return out, nil +} + +func GetPartitionRingCodec() codec.Codec { + return &partitionRingCodec{ + Codec: codec.NewProtoCodec("partitionRingDesc", PartitionRingDescFactory), + } +} + +// PartitionRingDescFactory makes new PartitionRingDesc. +func PartitionRingDescFactory() proto.Message { + return NewPartitionRingDesc() +} + +func GetOrCreatePartitionRingDesc(in any) *PartitionRingDesc { + if in == nil { + return NewPartitionRingDesc() + } + + desc := in.(*PartitionRingDesc) + if desc == nil { + return NewPartitionRingDesc() + } + + return desc +} + +func NewPartitionRingDesc() *PartitionRingDesc { + return &PartitionRingDesc{ + Partitions: map[int32]PartitionDesc{}, + Owners: map[string]OwnerDesc{}, + } +} + +// tokens returns a sort list of tokens registered by all partitions. +func (m *PartitionRingDesc) tokens() Tokens { + allTokens := make(Tokens, 0, len(m.Partitions)*optimalTokensPerInstance) + + for _, partition := range m.Partitions { + allTokens = append(allTokens, partition.Tokens...) + } + + slices.Sort(allTokens) + return allTokens +} + +// partitionByToken returns a map where they key is a registered token and the value is ID of the partition +// that registered that token. +func (m *PartitionRingDesc) partitionByToken() map[Token]int32 { + out := make(map[Token]int32, len(m.Partitions)*optimalTokensPerInstance) + + for partitionID, partition := range m.Partitions { + for _, token := range partition.Tokens { + out[Token(token)] = partitionID + } + } + + return out +} + +// ownersByPartition returns a map where the key is the partition ID and the value is a list of owner IDs. +func (m *PartitionRingDesc) ownersByPartition() map[int32][]string { + out := make(map[int32][]string, len(m.Partitions)) + for id, o := range m.Owners { + out[o.OwnedPartition] = append(out[o.OwnedPartition], id) + } + + // Sort owners to have predictable tests. + for id := range out { + slices.Sort(out[id]) + } + + return out +} + +// countPartitionsByState returns a map containing the number of partitions by state. +func (m *PartitionRingDesc) countPartitionsByState() map[PartitionState]int { + // Init the map to have to zero values for all states. + out := make(map[PartitionState]int, len(PartitionState_value)-2) + for _, state := range PartitionState_value { + if PartitionState(state) == PartitionUnknown || PartitionState(state) == PartitionDeleted { + continue + } + + out[PartitionState(state)] = 0 + } + + for _, partition := range m.Partitions { + out[partition.State]++ + } + + return out +} + +func (m *PartitionRingDesc) activePartitionsCount() int { + count := 0 + for _, partition := range m.Partitions { + if partition.IsActive() { + count++ + } + } + return count +} + +// WithPartitions returns a new PartitionRingDesc with only the specified partitions and their owners included. +func (m *PartitionRingDesc) WithPartitions(partitions map[int32]struct{}) PartitionRingDesc { + newPartitions := make(map[int32]PartitionDesc, len(partitions)) + newOwners := make(map[string]OwnerDesc, len(partitions)*2) // assuming two owners per partition. + + for pid, p := range m.Partitions { + if _, ok := partitions[pid]; ok { + newPartitions[pid] = p + } + } + + for oid, o := range m.Owners { + if _, ok := partitions[o.OwnedPartition]; ok { + newOwners[oid] = o + } + } + + return PartitionRingDesc{ + Partitions: newPartitions, + Owners: newOwners, + } +} + +// AddPartition adds a new partition to the ring. Tokens are auto-generated using the spread minimizing strategy +// which generates deterministic unique tokens. +func (m *PartitionRingDesc) AddPartition(id int32, state PartitionState, now time.Time) { + // Spread-minimizing token generator is deterministic unique-token generator for given id and zone. + // Partitions don't use zones. + spreadMinimizing := NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID("", int(id), 0, false) + + m.Partitions[id] = PartitionDesc{ + Id: id, + Tokens: spreadMinimizing.GenerateTokens(optimalTokensPerInstance, nil), + State: state, + StateTimestamp: now.Unix(), + } +} + +// UpdatePartitionState changes the state of a partition. Returns true if the state was changed, +// or false if the update was a no-op. +func (m *PartitionRingDesc) UpdatePartitionState(id int32, state PartitionState, now time.Time) bool { + d, ok := m.Partitions[id] + if !ok { + return false + } + + if d.State == state { + return false + } + + d.State = state + d.StateTimestamp = now.Unix() + m.Partitions[id] = d + return true +} + +// RemovePartition removes a partition. +func (m *PartitionRingDesc) RemovePartition(id int32) { + delete(m.Partitions, id) +} + +// HasPartition returns whether a partition exists. +func (m *PartitionRingDesc) HasPartition(id int32) bool { + _, ok := m.Partitions[id] + return ok +} + +// AddOrUpdateOwner adds or updates a partition owner in the ring. Returns true, if the +// owner was added or updated, false if it was left unchanged. +func (m *PartitionRingDesc) AddOrUpdateOwner(id string, state OwnerState, ownedPartition int32, now time.Time) bool { + prev, ok := m.Owners[id] + updated := OwnerDesc{ + State: state, + OwnedPartition: ownedPartition, + + // Preserve the previous timestamp so that we'll NOT compare it. + // Then, if we detect that the OwnerDesc should be updated, we'll + // also update the UpdateTimestamp. + UpdatedTimestamp: prev.UpdatedTimestamp, + } + + if ok && prev.Equal(updated) { + return false + } + + updated.UpdatedTimestamp = now.Unix() + m.Owners[id] = updated + + return true +} + +// RemoveOwner removes a partition owner. Returns true if the ring has been changed. +func (m *PartitionRingDesc) RemoveOwner(id string) bool { + if _, ok := m.Owners[id]; !ok { + return false + } + + delete(m.Owners, id) + return true +} + +// HasOwner returns whether a owner exists. +func (m *PartitionRingDesc) HasOwner(id string) bool { + _, ok := m.Owners[id] + return ok +} + +// PartitionOwnersCount returns the number of owners for a given partition. +func (m *PartitionRingDesc) PartitionOwnersCount(partitionID int32) int { + count := 0 + for _, o := range m.Owners { + if o.OwnedPartition == partitionID { + count++ + } + } + return count +} + +// PartitionOwnersCountUpdatedBefore returns the number of owners for a given partition, +// including only owners which have been updated the last time before the input timestamp. +func (m *PartitionRingDesc) PartitionOwnersCountUpdatedBefore(partitionID int32, before time.Time) int { + count := 0 + beforeSeconds := before.Unix() + + for _, o := range m.Owners { + if o.OwnedPartition == partitionID && o.GetUpdatedTimestamp() < beforeSeconds { + count++ + } + } + return count +} + +// Merge implements memberlist.Mergeable. +func (m *PartitionRingDesc) Merge(mergeable memberlist.Mergeable, localCAS bool) (memberlist.Mergeable, error) { + return m.mergeWithTime(mergeable, localCAS, time.Now()) +} + +func (m *PartitionRingDesc) mergeWithTime(mergeable memberlist.Mergeable, localCAS bool, now time.Time) (memberlist.Mergeable, error) { + if mergeable == nil { + return nil, nil + } + + other, ok := mergeable.(*PartitionRingDesc) + if !ok { + return nil, fmt.Errorf("expected *PartitionRingDesc, got %T", mergeable) + } + + if other == nil { + return nil, nil + } + + change := NewPartitionRingDesc() + + // Handle partitions. + for id, otherPart := range other.Partitions { + changed := false + + thisPart, exists := m.Partitions[id] + if !exists { + changed = true + thisPart = otherPart + } else { + // We don't merge changes to partition ID and tokens because we expect them to be immutable. + // + // If in the future we'll change the tokens generation algorithm and we'll have to handle migration to + // a different set of tokens then we'll add the support. For example, we could add "token generation version" + // to PartitionDesc and then preserve tokens generated by latest version only, or a timestamp for tokens + // update too. + + // In case the timestamp is equal we give priority to the deleted state. + // Reason is that timestamp has second precision, so we cover the case an + // update and subsequent deletion occur within the same second. + if otherPart.StateTimestamp > thisPart.StateTimestamp || (otherPart.StateTimestamp == thisPart.StateTimestamp && otherPart.State == PartitionDeleted && thisPart.State != PartitionDeleted) { + changed = true + + thisPart.State = otherPart.State + thisPart.StateTimestamp = otherPart.StateTimestamp + } + } + + if changed { + m.Partitions[id] = thisPart + change.Partitions[id] = thisPart + } + } + + if localCAS { + // Let's mark all missing partitions in incoming change as deleted. + // This breaks commutativity! But we only do it locally, not when gossiping with others. + for pid, thisPart := range m.Partitions { + if _, exists := other.Partitions[pid]; !exists && thisPart.State != PartitionDeleted { + // Partition was removed from the ring. We need to preserve it locally, but we set state to PartitionDeleted. + thisPart.State = PartitionDeleted + thisPart.StateTimestamp = now.Unix() + m.Partitions[pid] = thisPart + change.Partitions[pid] = thisPart + } + } + } + + // Now let's handle owners. + for id, otherOwner := range other.Owners { + thisOwner := m.Owners[id] + + // In case the timestamp is equal we give priority to the deleted state. + // Reason is that timestamp has second precision, so we cover the case an + // update and subsequent deletion occur within the same second. + if otherOwner.UpdatedTimestamp > thisOwner.UpdatedTimestamp || (otherOwner.UpdatedTimestamp == thisOwner.UpdatedTimestamp && otherOwner.State == OwnerDeleted && thisOwner.State != OwnerDeleted) { + m.Owners[id] = otherOwner + change.Owners[id] = otherOwner + } + } + + if localCAS { + // Mark all missing owners as deleted. + // This breaks commutativity! But we only do it locally, not when gossiping with others. + for id, thisOwner := range m.Owners { + if _, exists := other.Owners[id]; !exists && thisOwner.State != OwnerDeleted { + // Owner was removed from the ring. We need to preserve it locally, but we set state to OwnerDeleted. + thisOwner.State = OwnerDeleted + thisOwner.UpdatedTimestamp = now.Unix() + m.Owners[id] = thisOwner + change.Owners[id] = thisOwner + } + } + } + + // If nothing changed, report nothing. + if len(change.Partitions) == 0 && len(change.Owners) == 0 { + return nil, nil + } + + return change, nil +} + +// MergeContent implements memberlist.Mergeable. +func (m *PartitionRingDesc) MergeContent() []string { + result := make([]string, len(m.Partitions)+len(m.Owners)) + + // We're assuming that partition IDs and instance IDs are not colliding (ie. no instance is called "1"). + for pid := range m.Partitions { + result = append(result, strconv.Itoa(int(pid))) + } + + for id := range m.Owners { + result = append(result, id) + } + return result +} + +// RemoveTombstones implements memberlist.Mergeable. +func (m *PartitionRingDesc) RemoveTombstones(limit time.Time) (total, removed int) { + for pid, part := range m.Partitions { + if part.State == PartitionDeleted { + if limit.IsZero() || time.Unix(part.StateTimestamp, 0).Before(limit) { + delete(m.Partitions, pid) + removed++ + } else { + total++ + } + } + } + + for n, owner := range m.Owners { + if owner.State == OwnerDeleted { + if limit.IsZero() || time.Unix(owner.UpdatedTimestamp, 0).Before(limit) { + delete(m.Owners, n) + removed++ + } else { + total++ + } + } + } + + return +} + +// Clone implements memberlist.Mergeable. +func (m *PartitionRingDesc) Clone() memberlist.Mergeable { + clone := proto.Clone(m).(*PartitionRingDesc) + + // Ensure empty maps are preserved (easier to compare with a deep equal in tests). + if m.Partitions != nil && clone.Partitions == nil { + clone.Partitions = map[int32]PartitionDesc{} + } + if m.Owners != nil && clone.Owners == nil { + clone.Owners = map[string]OwnerDesc{} + } + + return clone +} + +func (m *PartitionDesc) IsPending() bool { + return m.GetState() == PartitionPending +} + +func (m *PartitionDesc) IsActive() bool { + return m.GetState() == PartitionActive +} + +func (m *PartitionDesc) IsInactive() bool { + return m.GetState() == PartitionInactive +} + +func (m *PartitionDesc) IsInactiveSince(since time.Time) bool { + return m.IsInactive() && m.GetStateTimestamp() < since.Unix() +} + +func (m *PartitionDesc) GetStateTime() time.Time { + return time.Unix(m.GetStateTimestamp(), 0) +} + +func (m *PartitionDesc) Clone() PartitionDesc { + return *(proto.Clone(m).(*PartitionDesc)) +} + +// CleanName returns the PartitionState name without the "Partition" prefix. +func (s PartitionState) CleanName() string { + return strings.TrimPrefix(s.String(), "Partition") +} diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml b/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml new file mode 100644 index 0000000000000..f4f9afe87d88f --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_status.gohtml @@ -0,0 +1,63 @@ +{{- /*gotype: github.com/grafana/dskit/ring.partitionRingPageData */ -}} + + + + + Partitions Ring Status + + +

Partitions Ring Status

+ + + + + + + + + + + + + {{ $stateChanges := .PartitionStateChanges }} + {{ range $partition := .Partitions }} + + + + + + + + {{ end }} + +
Partition IDStateState updated atOwnersActions
{{ .ID }} + {{ if .Corrupted }} + Corrupt + {{ else }} + {{ .State.CleanName }} + {{ end }} + + {{ if not .StateTimestamp.IsZero }} + {{ .StateTimestamp | formatTimestamp }} + {{ else }} + N/A + {{ end }} + + {{ range $ownerID := $partition.OwnerIDs }} + {{$ownerID}}
+ {{ end }} +
+ + {{ if and (not .Corrupted) (ne (index $stateChanges .State) 0) }} + {{ $toState := index $stateChanges .State }} +
+ + + + + +
+ {{ end }} +
+ + \ No newline at end of file diff --git a/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go b/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go new file mode 100644 index 0000000000000..39225697eb0ef --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partition_ring_watcher.go @@ -0,0 +1,100 @@ +package ring + +import ( + "context" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/services" +) + +// PartitionRingWatcher watches the partitions ring for changes in the KV store. +type PartitionRingWatcher struct { + services.Service + + key string + kv kv.Client + logger log.Logger + + ringMx sync.Mutex + ring *PartitionRing + + // Metrics. + numPartitionsGaugeVec *prometheus.GaugeVec +} + +func NewPartitionRingWatcher(name, key string, kv kv.Client, logger log.Logger, reg prometheus.Registerer) *PartitionRingWatcher { + r := &PartitionRingWatcher{ + key: key, + kv: kv, + logger: logger, + ring: NewPartitionRing(*NewPartitionRingDesc()), + numPartitionsGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "partition_ring_partitions", + Help: "Number of partitions by state in the partitions ring.", + ConstLabels: map[string]string{"name": name}, + }, []string{"state"}), + } + + r.Service = services.NewBasicService(r.starting, r.loop, nil).WithName("partitions-ring-watcher") + return r +} + +func (w *PartitionRingWatcher) starting(ctx context.Context) error { + // Get the initial ring state so that, as soon as the service will be running, the in-memory + // ring would be already populated and there's no race condition between when the service is + // running and the WatchKey() callback is called for the first time. + value, err := w.kv.Get(ctx, w.key) + if err != nil { + return errors.Wrap(err, "unable to initialise ring state") + } + + if value == nil { + level.Info(w.logger).Log("msg", "partition ring doesn't exist in KV store yet") + value = NewPartitionRingDesc() + } + + w.updatePartitionRing(value.(*PartitionRingDesc)) + return nil +} + +func (w *PartitionRingWatcher) loop(ctx context.Context) error { + w.kv.WatchKey(ctx, w.key, func(value interface{}) bool { + if value == nil { + level.Info(w.logger).Log("msg", "partition ring doesn't exist in KV store yet") + return true + } + + w.updatePartitionRing(value.(*PartitionRingDesc)) + return true + }) + return nil +} + +func (w *PartitionRingWatcher) updatePartitionRing(desc *PartitionRingDesc) { + newRing := NewPartitionRing(*desc) + + w.ringMx.Lock() + w.ring = newRing + w.ringMx.Unlock() + + // Update metrics. + for state, count := range desc.countPartitionsByState() { + w.numPartitionsGaugeVec.WithLabelValues(state.CleanName()).Set(float64(count)) + } +} + +// PartitionRing returns the most updated snapshot of the PartitionRing. The returned instance +// is immutable and will not be updated if new changes are done to the ring. +func (w *PartitionRingWatcher) PartitionRing() *PartitionRing { + w.ringMx.Lock() + defer w.ringMx.Unlock() + + return w.ring +} diff --git a/vendor/github.com/grafana/dskit/ring/partitions_ring_shuffle_shard_cache.go b/vendor/github.com/grafana/dskit/ring/partitions_ring_shuffle_shard_cache.go new file mode 100644 index 0000000000000..ce80d2c14adcf --- /dev/null +++ b/vendor/github.com/grafana/dskit/ring/partitions_ring_shuffle_shard_cache.go @@ -0,0 +1,96 @@ +package ring + +import ( + "math" + "sync" + "time" +) + +type partitionRingShuffleShardCache struct { + mtx sync.RWMutex + cacheWithoutLookback map[subringCacheKey]*PartitionRing + cacheWithLookback map[subringCacheKey]cachedSubringWithLookback[*PartitionRing] +} + +func newPartitionRingShuffleShardCache() *partitionRingShuffleShardCache { + return &partitionRingShuffleShardCache{ + cacheWithoutLookback: map[subringCacheKey]*PartitionRing{}, + cacheWithLookback: map[subringCacheKey]cachedSubringWithLookback[*PartitionRing]{}, + } +} + +func (r *partitionRingShuffleShardCache) setSubring(identifier string, size int, subring *PartitionRing) { + if subring == nil { + return + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + r.cacheWithoutLookback[subringCacheKey{identifier: identifier, shardSize: size}] = subring +} + +func (r *partitionRingShuffleShardCache) getSubring(identifier string, size int) *PartitionRing { + r.mtx.RLock() + defer r.mtx.RUnlock() + + cached := r.cacheWithoutLookback[subringCacheKey{identifier: identifier, shardSize: size}] + if cached == nil { + return nil + } + + return cached +} + +func (r *partitionRingShuffleShardCache) setSubringWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time, subring *PartitionRing) { + if subring == nil { + return + } + + var ( + lookbackWindowStart = now.Add(-lookbackPeriod).Unix() + validForLookbackWindowsStartingBefore = int64(math.MaxInt64) + ) + + for _, partition := range subring.desc.Partitions { + stateChangedDuringLookbackWindow := partition.StateTimestamp >= lookbackWindowStart + + if stateChangedDuringLookbackWindow && partition.StateTimestamp < validForLookbackWindowsStartingBefore { + validForLookbackWindowsStartingBefore = partition.StateTimestamp + } + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + // Only update cache if subring's lookback window starts later than the previously cached subring for this identifier, + // if there is one. This prevents cache thrashing due to different calls competing if their lookback windows start + // before and after the time a partition state has changed. + key := subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod} + + if existingEntry, haveCached := r.cacheWithLookback[key]; !haveCached || existingEntry.validForLookbackWindowsStartingAfter < lookbackWindowStart { + r.cacheWithLookback[key] = cachedSubringWithLookback[*PartitionRing]{ + subring: subring, + validForLookbackWindowsStartingAfter: lookbackWindowStart, + validForLookbackWindowsStartingBefore: validForLookbackWindowsStartingBefore, + } + } +} + +func (r *partitionRingShuffleShardCache) getSubringWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) *PartitionRing { + r.mtx.RLock() + defer r.mtx.RUnlock() + + cached, ok := r.cacheWithLookback[subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod}] + if !ok { + return nil + } + + lookbackWindowStart := now.Add(-lookbackPeriod).Unix() + if lookbackWindowStart < cached.validForLookbackWindowsStartingAfter || lookbackWindowStart > cached.validForLookbackWindowsStartingBefore { + // The cached subring is not valid for the lookback window that has been requested. + return nil + } + + return cached.subring +} diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index f05153c0525cb..ffdcf80ab5268 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "sort" + "sync" "time" kitlog "github.com/go-kit/log" @@ -388,6 +389,111 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex return results, nil } +// DoMultiUntilQuorumWithoutSuccessfulContextCancellation behaves similar to DoUntilQuorumWithoutSuccessfulContextCancellation +// with the following exceptions: +// +// - This function calls DoUntilQuorumWithoutSuccessfulContextCancellation for each input ReplicationSet and requires +// DoUntilQuorumWithoutSuccessfulContextCancellation to successfully run for each of them. Execution breaks on the +// first error returned by DoUntilQuorumWithoutSuccessfulContextCancellation on any ReplicationSet. +// +// - This function requires that the callback function f always call context.CancelCauseFunc once done. Failing to +// cancel the context will leak resources. +func DoMultiUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, sets []ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelCauseFunc) (T, error), cleanupFunc func(T)) ([]T, error) { + if len(sets) == 0 { + return nil, errors.New("no replication sets") + } + if len(sets) == 1 { + return DoUntilQuorumWithoutSuccessfulContextCancellation[T](ctx, sets[0], cfg, f, cleanupFunc) + } + + results, _, err := doMultiUntilQuorumWithoutSuccessfulContextCancellation[T](ctx, sets, cfg, f, cleanupFunc) + return results, err +} + +// See DoMultiUntilQuorumWithoutSuccessfulContextCancellation(). +// +// The returned context.Context is the internal context used by workers and it's used for testing purposes. +func doMultiUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Context, sets []ReplicationSet, cfg DoUntilQuorumConfig, f func(context.Context, *InstanceDesc, context.CancelCauseFunc) (T, error), cleanupFunc func(T)) ([]T, context.Context, error) { + var ( + returnResultsMx = sync.Mutex{} + returnResults = make([]T, 0, len(sets)*len(sets[0].Instances)) // Assume all replication sets have the same number of instances. + + returnErrOnce sync.Once + returnErr error // The first error occurred. + + workersGroup = sync.WaitGroup{} + workersCtx, cancelWorkersCtx = context.WithCancelCause(ctx) + + inflightTracker = newInflightInstanceTracker(sets) + ) + + cancelWorkersCtxIfSafe := func() { + if inflightTracker.allInstancesCompleted() { + cancelWorkersCtx(errors.New("all requests completed")) + } + } + + // Start a worker for each set. A worker is responsible to call DoUntilQuorumWithoutSuccessfulContextCancellation() + // for the given replication set and handle the result. + workersGroup.Add(len(sets)) + + for idx, set := range sets { + go func(idx int, set ReplicationSet) { + defer workersGroup.Done() + + wrappedFn := func(ctx context.Context, instance *InstanceDesc, cancelCtx context.CancelCauseFunc) (T, error) { + // The callback function has been called, so we need to track it. + inflightTracker.addInstance(idx, instance) + + // Inject custom logic in the context.CancelCauseFunc. + return f(ctx, instance, func(cause error) { + // Call the original one. + cancelCtx(cause) + + // The callback has done, so we can remove it from tracker and then check if it's safe + // to cancel the workers context. + inflightTracker.removeInstance(idx, instance) + cancelWorkersCtxIfSafe() + }) + } + + setResults, setErr := DoUntilQuorumWithoutSuccessfulContextCancellation[T](workersCtx, set, cfg, wrappedFn, cleanupFunc) + + if setErr != nil { + returnErrOnce.Do(func() { + returnErr = setErr + + // Interrupt the execution of all workers. + cancelWorkersCtx(setErr) + }) + + return + } + + // Keep track of the results. + returnResultsMx.Lock() + returnResults = append(returnResults, setResults...) + returnResultsMx.Unlock() + }(idx, set) + } + + // Wait until all goroutines have terminated. + workersGroup.Wait() + + // All workers completed, so it's guaranteed returnResults and returnErr won't be accessed by workers anymore, + // and it's safe to read them with no locking. + if returnErr != nil { + return nil, workersCtx, returnErr + } + + // No error occurred. It means workers context hasn't been canceled yet, and we don't expect more callbacks + // to get tracked, so we can check if the cancelling condition has already been reached and eventually do it. + inflightTracker.allInstancesAdded() + cancelWorkersCtxIfSafe() + + return returnResults, workersCtx, nil +} + type instanceResult[T any] struct { result T err error @@ -405,6 +511,16 @@ func (r ReplicationSet) Includes(addr string) bool { return false } +// GetIDs returns the IDs of all instances within the replication set. Returned slice +// order is not guaranteed. +func (r ReplicationSet) GetIDs() []string { + ids := make([]string, 0, len(r.Instances)) + for _, desc := range r.Instances { + ids = append(ids, desc.Id) + } + return ids +} + // GetAddresses returns the addresses of all instances within the replication set. Returned slice // order is not guaranteed. func (r ReplicationSet) GetAddresses() []string { @@ -468,6 +584,17 @@ func HasReplicationSetChangedWithoutState(before, after ReplicationSet) bool { }) } +// Has HasReplicationSetChangedWithoutStateOrAddr returns false if two replications sets +// are the same (with possibly different timestamps, instance states, and ip addresses), +// true if they differ in any other way (number of instances, tokens, zones, ...). +func HasReplicationSetChangedWithoutStateOrAddr(before, after ReplicationSet) bool { + return hasReplicationSetChangedExcluding(before, after, func(i *InstanceDesc) { + i.Timestamp = 0 + i.State = PENDING + i.Addr = "" + }) +} + // Do comparison of replicasets, but apply a function first // to be able to exclude (reset) some values func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude func(*InstanceDesc)) bool { @@ -478,8 +605,8 @@ func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude fun return true } - sort.Sort(ByAddr(beforeInstances)) - sort.Sort(ByAddr(afterInstances)) + sort.Sort(ByID(beforeInstances)) + sort.Sort(ByID(afterInstances)) for i := 0; i < len(beforeInstances); i++ { b := beforeInstances[i] diff --git a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go index 202b568bb9567..73da1bc37f8ac 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set_tracker.go @@ -4,6 +4,7 @@ import ( "context" "errors" "math/rand" + "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -465,3 +466,91 @@ func (t *zoneAwareContextTracker) cancelAllContexts(cause error) { delete(t.cancelFuncs, instance) } } + +type inflightInstanceTracker struct { + mx sync.Mutex + inflight [][]*InstanceDesc + + // expectMoreInstances is true if more instances are expected to be added to the tracker. + expectMoreInstances bool +} + +func newInflightInstanceTracker(sets []ReplicationSet) *inflightInstanceTracker { + // Init the inflight tracker. + inflight := make([][]*InstanceDesc, len(sets)) + for idx, set := range sets { + inflight[idx] = make([]*InstanceDesc, 0, len(set.Instances)) + } + + return &inflightInstanceTracker{ + inflight: inflight, + expectMoreInstances: true, + } +} + +// addInstance adds the instance for replicationSetIdx to the tracker. +// +// addInstance is idempotent. +func (t *inflightInstanceTracker) addInstance(replicationSetIdx int, instance *InstanceDesc) { + t.mx.Lock() + defer t.mx.Unlock() + + // Check if the instance has already been added. + for _, curr := range t.inflight[replicationSetIdx] { + if curr == instance { + return + } + } + + t.inflight[replicationSetIdx] = append(t.inflight[replicationSetIdx], instance) +} + +// removeInstance removes the instance for replicationSetIdx from the tracker. +// +// removeInstance is idempotent. +func (t *inflightInstanceTracker) removeInstance(replicationSetIdx int, instance *InstanceDesc) { + t.mx.Lock() + defer t.mx.Unlock() + + for i, curr := range t.inflight[replicationSetIdx] { + if curr == instance { + instances := t.inflight[replicationSetIdx] + t.inflight[replicationSetIdx] = append(instances[:i], instances[i+1:]...) + + // We can safely break the loop because we don't expect multiple occurrences of the same instance. + return + } + } +} + +// allInstancesAdded signals the tracker that all expected instances have been added. +// +// allInstancesAdded is idempotent. +func (t *inflightInstanceTracker) allInstancesAdded() { + t.mx.Lock() + defer t.mx.Unlock() + + t.expectMoreInstances = false +} + +// allInstancesCompleted returns true if and only if no more instances are expected to be +// added to the tracker and all previously tracked instances have been removed calling removeInstance(). +func (t *inflightInstanceTracker) allInstancesCompleted() bool { + t.mx.Lock() + defer t.mx.Unlock() + + // We can't assert all instances have completed if it's still possible + // to add new ones to the tracker. + if t.expectMoreInstances { + return false + } + + // Ensure there are no inflight instances for any replication set. + for _, instances := range t.inflight { + if len(instances) > 0 { + return false + } + } + + return true +} diff --git a/vendor/github.com/grafana/dskit/ring/ring.go b/vendor/github.com/grafana/dskit/ring/ring.go index 0c54bb1c5433e..8c2481edcf7da 100644 --- a/vendor/github.com/grafana/dskit/ring/ring.go +++ b/vendor/github.com/grafana/dskit/ring/ring.go @@ -78,6 +78,12 @@ type ReadRing interface { // GetTokenRangesForInstance returns the token ranges owned by an instance in the ring GetTokenRangesForInstance(instanceID string) (TokenRanges, error) + + // InstancesInZoneCount returns the number of instances in the ring that are registered in given zone. + InstancesInZoneCount(zone string) int + + // ZonesCount returns the number of zones for which there's at least 1 instance registered in the ring. + ZonesCount() int } var ( @@ -184,10 +190,13 @@ type Ring struct { // to be sorted alphabetically. ringZones []string + // Number of registered instances per zone. + instancesCountPerZone map[string]int + // Cache of shuffle-sharded subrings per identifier. Invalidated when topology changes. // If set to nil, no caching is done (used by tests, and subrings). shuffledSubringCache map[subringCacheKey]*Ring - shuffledSubringWithLookbackCache map[subringCacheKey]cachedSubringWithLookback + shuffledSubringWithLookbackCache map[subringCacheKey]cachedSubringWithLookback[*Ring] numMembersGaugeVec *prometheus.GaugeVec totalTokensGauge prometheus.Gauge @@ -202,8 +211,8 @@ type subringCacheKey struct { lookbackPeriod time.Duration } -type cachedSubringWithLookback struct { - subring *Ring +type cachedSubringWithLookback[R any] struct { + subring R validForLookbackWindowsStartingAfter int64 // if the lookback window is from T to S, validForLookbackWindowsStartingAfter is the earliest value of T this cache entry is valid for validForLookbackWindowsStartingBefore int64 // if the lookback window is from T to S, validForLookbackWindowsStartingBefore is the latest value of T this cache entry is valid for } @@ -237,7 +246,7 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client strategy: strategy, ringDesc: &Desc{}, shuffledSubringCache: map[subringCacheKey]*Ring{}, - shuffledSubringWithLookbackCache: map[subringCacheKey]cachedSubringWithLookback{}, + shuffledSubringWithLookbackCache: map[subringCacheKey]cachedSubringWithLookback[*Ring]{}, numMembersGaugeVec: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "ring_members", Help: "Number of members in the ring", @@ -333,6 +342,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) { ringInstanceByToken := ringDesc.getTokensInfo() ringZones := getZones(ringTokensByZone) oldestRegisteredTimestamp := ringDesc.getOldestRegisteredTimestamp() + instancesCountPerZone := ringDesc.instancesCountPerZone() r.mtx.Lock() defer r.mtx.Unlock() @@ -341,6 +351,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) { r.ringTokensByZone = ringTokensByZone r.ringInstanceByToken = ringInstanceByToken r.ringZones = ringZones + r.instancesCountPerZone = instancesCountPerZone r.oldestRegisteredTimestamp = oldestRegisteredTimestamp r.lastTopologyChange = now @@ -349,7 +360,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) { r.shuffledSubringCache = make(map[subringCacheKey]*Ring) } if r.shuffledSubringWithLookbackCache != nil { - r.shuffledSubringWithLookbackCache = make(map[subringCacheKey]cachedSubringWithLookback) + r.shuffledSubringWithLookbackCache = make(map[subringCacheKey]cachedSubringWithLookback[*Ring]) } r.updateRingMetrics(rc) @@ -676,7 +687,7 @@ func (r *Ring) ShuffleShard(identifier string, size int) ReadRing { // operations (read only). // // This function supports caching, but the cache will only be effective if successive calls for the -// same identifier are for increasing values of (now-lookbackPeriod). +// same identifier are with the same lookbackPeriod and increasing values of now. func (r *Ring) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing { // Nothing to do if the shard size is not smaller then the actual ring. if size <= 0 || r.InstancesCount() <= size { @@ -797,12 +808,13 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur shardTokens := mergeTokenGroups(shardTokensByZone) return &Ring{ - cfg: r.cfg, - strategy: r.strategy, - ringDesc: shardDesc, - ringTokens: shardTokens, - ringTokensByZone: shardTokensByZone, - ringZones: getZones(shardTokensByZone), + cfg: r.cfg, + strategy: r.strategy, + ringDesc: shardDesc, + ringTokens: shardTokens, + ringTokensByZone: shardTokensByZone, + ringZones: getZones(shardTokensByZone), + instancesCountPerZone: shardDesc.instancesCountPerZone(), oldestRegisteredTimestamp: shardDesc.getOldestRegisteredTimestamp(), @@ -866,16 +878,32 @@ func mergeTokenGroups(groupsByName map[string][]uint32) []uint32 { return merged } -// GetInstanceState returns the current state of an instance or an error if the -// instance does not exist in the ring. -func (r *Ring) GetInstanceState(instanceID string) (InstanceState, error) { +// GetInstance return the InstanceDesc for the given instanceID or an error +// if the instance doesn't exist in the ring. The returned InstanceDesc is NOT a +// deep copy, so the caller should never modify it. +func (r *Ring) GetInstance(instanceID string) (doNotModify InstanceDesc, _ error) { r.mtx.RLock() defer r.mtx.RUnlock() instances := r.ringDesc.GetIngesters() + if instances == nil { + return InstanceDesc{}, ErrInstanceNotFound + } + instance, ok := instances[instanceID] if !ok { - return PENDING, ErrInstanceNotFound + return InstanceDesc{}, ErrInstanceNotFound + } + + return instance, nil +} + +// GetInstanceState returns the current state of an instance or an error if the +// instance does not exist in the ring. +func (r *Ring) GetInstanceState(instanceID string) (InstanceState, error) { + instance, err := r.GetInstance(instanceID) + if err != nil { + return PENDING, err } return instance.GetState(), nil @@ -1017,7 +1045,7 @@ func (r *Ring) setCachedShuffledSubringWithLookback(identifier string, size int, key := subringCacheKey{identifier: identifier, shardSize: size, lookbackPeriod: lookbackPeriod} if existingEntry, haveCached := r.shuffledSubringWithLookbackCache[key]; !haveCached || existingEntry.validForLookbackWindowsStartingAfter < lookbackWindowStart { - r.shuffledSubringWithLookbackCache[key] = cachedSubringWithLookback{ + r.shuffledSubringWithLookbackCache[key] = cachedSubringWithLookback[*Ring]{ subring: subring, validForLookbackWindowsStartingAfter: lookbackWindowStart, validForLookbackWindowsStartingBefore: validForLookbackWindowsStartingBefore, @@ -1063,6 +1091,21 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { newRingPageHandler(r, r.cfg.HeartbeatTimeout).handle(w, req) } +// InstancesInZoneCount returns the number of instances in the ring that are registered in given zone. +func (r *Ring) InstancesInZoneCount(zone string) int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.instancesCountPerZone[zone] +} + +func (r *Ring) ZonesCount() int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return len(r.ringZones) +} + // Operation describes which instances can be included in the replica set, based on their state. // // Implemented as bitmap, with upper 16-bits used for encoding extendReplicaSet, and lower 16-bits used for encoding healthy states. diff --git a/vendor/github.com/grafana/dskit/ring/http.go b/vendor/github.com/grafana/dskit/ring/ring_http.go similarity index 96% rename from vendor/github.com/grafana/dskit/ring/http.go rename to vendor/github.com/grafana/dskit/ring/ring_http.go index e70b3e6f0a1f9..7300430ddac18 100644 --- a/vendor/github.com/grafana/dskit/ring/http.go +++ b/vendor/github.com/grafana/dskit/ring/ring_http.go @@ -13,7 +13,7 @@ import ( "time" ) -//go:embed status.gohtml +//go:embed ring_status.gohtml var defaultPageContent string var defaultPageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{ "mod": func(i, j int) bool { return i%j == 0 }, @@ -134,7 +134,7 @@ func (h *ringPageHandler) handle(w http.ResponseWriter, req *http.Request) { // RenderHTTPResponse either responds with json or a rendered html page using the passed in template // by checking the Accepts header -func renderHTTPResponse(w http.ResponseWriter, v httpResponse, t *template.Template, r *http.Request) { +func renderHTTPResponse(w http.ResponseWriter, v any, t *template.Template, r *http.Request) { accept := r.Header.Get("Accept") if strings.Contains(accept, "application/json") { writeJSONResponse(w, v) @@ -161,7 +161,7 @@ func (h *ringPageHandler) forget(ctx context.Context, id string) error { } // WriteJSONResponse writes some JSON as a HTTP response. -func writeJSONResponse(w http.ResponseWriter, v httpResponse) { +func writeJSONResponse(w http.ResponseWriter, v any) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(v); err != nil { diff --git a/vendor/github.com/grafana/dskit/ring/status.gohtml b/vendor/github.com/grafana/dskit/ring/ring_status.gohtml similarity index 100% rename from vendor/github.com/grafana/dskit/ring/status.gohtml rename to vendor/github.com/grafana/dskit/ring/ring_status.gohtml diff --git a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go index 2363825076fcd..bd2ed9970a594 100644 --- a/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go +++ b/vendor/github.com/grafana/dskit/ring/spread_minimizing_token_generator.go @@ -8,10 +8,6 @@ import ( "sort" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "golang.org/x/exp/slices" ) @@ -22,11 +18,10 @@ const ( ) var ( - instanceIDRegex = regexp.MustCompile(`^(.*)-(\d+)$`) + instanceIDRegex = regexp.MustCompile(`^(.*-)(\d+)$`) errorBadInstanceIDFormat = func(instanceID string) error { return fmt.Errorf("unable to extract instance id from %q", instanceID) } - errorNoPreviousInstance = fmt.Errorf("impossible to find the instance preceding the target instance, because it is the first instance") errorMissingPreviousInstance = func(requiredInstanceID string) error { return fmt.Errorf("the instance %q has not been registered to the ring or has no tokens yet", requiredInstanceID) @@ -49,15 +44,13 @@ var ( ) type SpreadMinimizingTokenGenerator struct { - instanceID int - instance string - zoneID int - spreadMinimizingZones []string - canJoinEnabled bool - logger log.Logger + instanceID int + instancePrefix string + zoneID int + canJoinEnabled bool } -func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool, logger log.Logger) (*SpreadMinimizingTokenGenerator, error) { +func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZones []string, canJoinEnabled bool) (*SpreadMinimizingTokenGenerator, error) { if len(spreadMinimizingZones) <= 0 || len(spreadMinimizingZones) > maxZonesCount { return nil, errorZoneCountOutOfBound(len(spreadMinimizingZones)) } @@ -66,52 +59,35 @@ func NewSpreadMinimizingTokenGenerator(instance, zone string, spreadMinimizingZo if !slices.IsSorted(sortedZones) { sort.Strings(sortedZones) } - instanceID, err := parseInstanceID(instance) + zoneID, err := findZoneID(zone, sortedZones) if err != nil { return nil, err } - zoneID, err := findZoneID(zone, sortedZones) + + prefix, instanceID, err := parseInstanceID(instance) if err != nil { return nil, err } - tokenGenerator := &SpreadMinimizingTokenGenerator{ - instanceID: instanceID, - instance: instance, - zoneID: zoneID, - spreadMinimizingZones: sortedZones, - canJoinEnabled: canJoinEnabled, - logger: logger, - } - return tokenGenerator, nil + return NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID(prefix, instanceID, zoneID, canJoinEnabled), nil } -func parseInstanceID(instanceID string) (int, error) { - parts := instanceIDRegex.FindStringSubmatch(instanceID) - if len(parts) != 3 { - return 0, errorBadInstanceIDFormat(instanceID) +func NewSpreadMinimizingTokenGeneratorForInstanceAndZoneID(instancePrefix string, instanceID, zoneID int, canJoinEnabled bool) *SpreadMinimizingTokenGenerator { + return &SpreadMinimizingTokenGenerator{ + instanceID: instanceID, + instancePrefix: instancePrefix, + zoneID: zoneID, + canJoinEnabled: canJoinEnabled, } - return strconv.Atoi(parts[2]) } -// previousInstance determines the string id of the instance preceding the given instance string id. -// If it is impossible to parse the given instanceID, or it is impossible to determine its predecessor -// because the passed instanceID has a bad format, or has no predecessor, an error is returned. -// For examples, my-instance-1 is preceded by instance my-instance-0, but my-instance-0 has no -// predecessor because its index is 0. -func previousInstance(instanceID string) (string, error) { +func parseInstanceID(instanceID string) (string, int, error) { parts := instanceIDRegex.FindStringSubmatch(instanceID) if len(parts) != 3 { - return "", errorBadInstanceIDFormat(instanceID) - } - id, err := strconv.Atoi(parts[2]) - if err != nil { - return "", err - } - if id == 0 { - return "", errorNoPreviousInstance + return "", 0, errorBadInstanceIDFormat(instanceID) } - return fmt.Sprintf("%s-%d", parts[1], id-1), nil + val, err := strconv.Atoi(parts[2]) + return parts[1], val, err } // findZoneID gets a zone name and a slice of sorted zones, @@ -193,7 +169,11 @@ func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int used[v] = true } - allTokens := t.generateAllTokens() + allTokens, err := t.generateAllTokens() + if err != nil { + // we were unable to generate required tokens, so we panic. + panic(err) + } uniqueTokens := make(Tokens, 0, requestedTokensCount) // allTokens is a sorted slice of tokens for instance t.cfg.InstanceID in zone t.cfg.zone @@ -214,11 +194,14 @@ func (t *SpreadMinimizingTokenGenerator) GenerateTokens(requestedTokensCount int // placed in the ring that already contains instances with all the ids lower that t.instanceID // is optimal. // Calls to this method will always return the same set of tokens. -func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { - tokensByInstanceID := t.generateTokensByInstanceID() +func (t *SpreadMinimizingTokenGenerator) generateAllTokens() (Tokens, error) { + tokensByInstanceID, err := t.generateTokensByInstanceID() + if err != nil { + return nil, err + } allTokens := tokensByInstanceID[t.instanceID] slices.Sort(allTokens) - return allTokens + return allTokens, nil } // generateTokensByInstanceID generates the optimal number of tokens (optimalTokenPerInstance), @@ -226,13 +209,13 @@ func (t *SpreadMinimizingTokenGenerator) generateAllTokens() Tokens { // (with id t.instanceID). Generated tokens are not sorted, but they are distributed in such a // way that registered ownership of all the instances is optimal. // Calls to this method will always return the same set of tokens. -func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]Tokens { +func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() (map[int]Tokens, error) { firstInstanceTokens := t.generateFirstInstanceTokens() tokensByInstanceID := make(map[int]Tokens, t.instanceID+1) tokensByInstanceID[0] = firstInstanceTokens if t.instanceID == 0 { - return tokensByInstanceID + return tokensByInstanceID, nil } // tokensQueues is a slice of priority queues. Slice indexes correspond @@ -272,10 +255,8 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To optimalTokenOwnership := t.optimalTokenOwnership(optimalInstanceOwnership, currInstanceOwnership, uint32(optimalTokensPerInstance-addedTokens)) highestOwnershipInstance := instanceQueue.Peek() if highestOwnershipInstance == nil || highestOwnershipInstance.ownership <= float64(optimalTokenOwnership) { - level.Warn(t.logger).Log("msg", "it was impossible to add a token because the instance with the highest ownership cannot satisfy the request", "added tokens", addedTokens+1, "highest ownership", highestOwnershipInstance.ownership, "requested ownership", optimalTokenOwnership) - // if this happens, it means that we cannot accommodate other tokens, so we panic - err := fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone %s because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID], optimalTokenOwnership) - panic(err) + // if this happens, it means that we cannot accommodate other tokens + return nil, fmt.Errorf("it was impossible to add %dth token for instance with id %d in zone id %d because the instance with the highest ownership cannot satisfy the requested ownership %d", addedTokens+1, i, t.zoneID, optimalTokenOwnership) } tokensQueue := tokensQueues[highestOwnershipInstance.item.instanceID] highestOwnershipToken := tokensQueue.Peek() @@ -288,10 +269,8 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To token := highestOwnershipToken.item newToken, err := t.calculateNewToken(token, optimalTokenOwnership) if err != nil { - level.Error(t.logger).Log("msg", "it was impossible to calculate a new token because an error occurred", "err", err) - // if this happens, it means that we cannot accommodate additional tokens, so we panic - err := fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone %s", addedTokens+1, i, t.spreadMinimizingZones[t.zoneID]) - panic(err) + // if this happens, it means that we cannot accommodate additional tokens + return nil, fmt.Errorf("it was impossible to calculate the %dth token for instance with id %d in zone id %d", addedTokens+1, i, t.zoneID) } tokens = append(tokens, newToken) // add the new token to currInstanceTokenQueue @@ -317,7 +296,7 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To tokensByInstanceID[i] = tokens // if this is the last iteration we return, so we avoid to call additional heap.Pushs if i == t.instanceID { - return tokensByInstanceID + return tokensByInstanceID, nil } // If there were some ignored instances, we put them back on the queue. @@ -331,7 +310,7 @@ func (t *SpreadMinimizingTokenGenerator) generateTokensByInstanceID() map[int]To heap.Push(&instanceQueue, newRingInstanceOwnershipInfo(i, currInstanceOwnership)) } - return tokensByInstanceID + return tokensByInstanceID, nil } func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDesc) error { @@ -339,13 +318,10 @@ func (t *SpreadMinimizingTokenGenerator) CanJoin(instances map[string]InstanceDe return nil } - prevInstance, err := previousInstance(t.instance) - if err != nil { - if errors.Is(err, errorNoPreviousInstance) { - return nil - } - return err + if t.instanceID == 0 { + return nil } + prevInstance := fmt.Sprintf("%s%d", t.instancePrefix, t.instanceID-1) instanceDesc, ok := instances[prevInstance] if ok && len(instanceDesc.Tokens) != 0 { return nil diff --git a/vendor/github.com/grafana/dskit/ring/tokens.go b/vendor/github.com/grafana/dskit/ring/tokens.go index cf4999ff5d21f..7f0780639421b 100644 --- a/vendor/github.com/grafana/dskit/ring/tokens.go +++ b/vendor/github.com/grafana/dskit/ring/tokens.go @@ -7,6 +7,8 @@ import ( "sort" ) +type Token uint32 + // Tokens is a simple list of tokens. type Tokens []uint32 diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 6c2133a9bc242..effe2e54eaf88 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -28,7 +28,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/config" "github.com/prometheus/exporter-toolkit/web" - "github.com/soheilhy/cmux" "golang.org/x/net/netutil" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -114,7 +113,6 @@ type Config struct { HTTPMiddleware []middleware.Interface `yaml:"-"` Router *mux.Router `yaml:"-"` DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` - RouteHTTPToGRPC bool `yaml:"-"` GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` @@ -132,6 +130,7 @@ type Config struct { LogLevel log.Level `yaml:"log_level"` Log gokit_log.Logger `yaml:"-"` LogSourceIPs bool `yaml:"log_source_ips_enabled"` + LogSourceIPsFull bool `yaml:"log_source_ips_full"` LogSourceIPsHeader string `yaml:"log_source_ips_header"` LogSourceIPsRegex string `yaml:"log_source_ips_regex"` LogRequestHeaders bool `yaml:"log_request_headers"` @@ -196,6 +195,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.LogFormat, "log.format", log.LogfmtFormat, "Output log messages in the given format. Valid formats: [logfmt, json]") cfg.LogLevel.RegisterFlags(f) f.BoolVar(&cfg.LogSourceIPs, "server.log-source-ips-enabled", false, "Optionally log the source IPs.") + f.BoolVar(&cfg.LogSourceIPsFull, "server.log-source-ips-full", false, "Log all source IPs instead of only the originating one. Only used if server.log-source-ips-enabled is true") f.StringVar(&cfg.LogSourceIPsHeader, "server.log-source-ips-header", "", "Header field storing the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used") f.StringVar(&cfg.LogSourceIPsRegex, "server.log-source-ips-regex", "", "Regex for matching the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used") f.BoolVar(&cfg.LogRequestHeaders, "server.log-request-headers", false, "Optionally log request headers.") @@ -220,13 +220,6 @@ type Server struct { grpcListener net.Listener httpListener net.Listener - // These fields are used to support grpc over the http server - // if RouteHTTPToGRPC is set. the fields are kept here - // so they can be initialized in New() and started in Run() - grpchttpmux cmux.CMux - grpcOnHTTPListener net.Listener - GRPCOnHTTPServer *grpc.Server - HTTP *mux.Router HTTPServer *http.Server GRPC *grpc.Server @@ -278,15 +271,6 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { httpListener = netutil.LimitListener(httpListener, cfg.HTTPConnLimit) } - var grpcOnHTTPListener net.Listener - var grpchttpmux cmux.CMux - if cfg.RouteHTTPToGRPC { - grpchttpmux = cmux.New(httpListener) - - httpListener = grpchttpmux.Match(cmux.HTTP1Fast("PATCH")) - grpcOnHTTPListener = grpchttpmux.Match(cmux.HTTP2()) - } - network = cfg.GRPCListenNetwork if network == "" { network = DefaultNetwork @@ -437,41 +421,10 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpcOptions = append(grpcOptions, grpc.Creds(grpcCreds)) } grpcServer := grpc.NewServer(grpcOptions...) - grpcOnHTTPServer := grpc.NewServer(grpcOptions...) - sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex) + httpMiddleware, err := BuildHTTPMiddleware(cfg, router, metrics, logger) if err != nil { - return nil, fmt.Errorf("error setting up source IP extraction: %v", err) - } - logSourceIPs := sourceIPs - if !cfg.LogSourceIPs { - // We always include the source IPs for traces, - // but only want to log them in the middleware if that is enabled. - logSourceIPs = nil - } - - defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) - defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog - - defaultHTTPMiddleware := []middleware.Interface{ - middleware.Tracer{ - RouteMatcher: router, - SourceIPs: sourceIPs, - }, - defaultLogMiddleware, - middleware.Instrument{ - RouteMatcher: router, - Duration: metrics.RequestDuration, - RequestBodySize: metrics.ReceivedMessageSize, - ResponseBodySize: metrics.SentMessageSize, - InflightRequests: metrics.InflightRequests, - }, - } - var httpMiddleware []middleware.Interface - if cfg.DoNotAddDefaultHTTPMiddleware { - httpMiddleware = cfg.HTTPMiddleware - } else { - httpMiddleware = append(defaultHTTPMiddleware, cfg.HTTPMiddleware...) + return nil, fmt.Errorf("error building http middleware: %w", err) } httpServer := &http.Server{ @@ -491,20 +444,17 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { } return &Server{ - cfg: cfg, - httpListener: httpListener, - grpcListener: grpcListener, - grpcOnHTTPListener: grpcOnHTTPListener, - handler: handler, - grpchttpmux: grpchttpmux, - - HTTP: router, - HTTPServer: httpServer, - GRPC: grpcServer, - GRPCOnHTTPServer: grpcOnHTTPServer, - Log: logger, - Registerer: cfg.registererOrDefault(), - Gatherer: gatherer, + cfg: cfg, + httpListener: httpListener, + grpcListener: grpcListener, + handler: handler, + + HTTP: router, + HTTPServer: httpServer, + GRPC: grpcServer, + Log: logger, + Registerer: cfg.registererOrDefault(), + Gatherer: gatherer, }, nil } @@ -521,6 +471,45 @@ func RegisterInstrumentationWithGatherer(router *mux.Router, gatherer prometheus router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux) } +func BuildHTTPMiddleware(cfg Config, router *mux.Router, metrics *Metrics, logger gokit_log.Logger) ([]middleware.Interface, error) { + sourceIPs, err := middleware.NewSourceIPs(cfg.LogSourceIPsHeader, cfg.LogSourceIPsRegex, cfg.LogSourceIPsFull) + if err != nil { + return nil, fmt.Errorf("error setting up source IP extraction: %w", err) + } + logSourceIPs := sourceIPs + if !cfg.LogSourceIPs { + // We always include the source IPs for traces, + // but only want to log them in the middleware if that is enabled. + logSourceIPs = nil + } + + defaultLogMiddleware := middleware.NewLogMiddleware(logger, cfg.LogRequestHeaders, cfg.LogRequestAtInfoLevel, logSourceIPs, strings.Split(cfg.LogRequestExcludeHeadersList, ",")) + defaultLogMiddleware.DisableRequestSuccessLog = cfg.DisableRequestSuccessLog + + defaultHTTPMiddleware := []middleware.Interface{ + middleware.Tracer{ + RouteMatcher: router, + SourceIPs: sourceIPs, + }, + defaultLogMiddleware, + middleware.Instrument{ + RouteMatcher: router, + Duration: metrics.RequestDuration, + RequestBodySize: metrics.ReceivedMessageSize, + ResponseBodySize: metrics.SentMessageSize, + InflightRequests: metrics.InflightRequests, + }, + } + var httpMiddleware []middleware.Interface + if cfg.DoNotAddDefaultHTTPMiddleware { + httpMiddleware = cfg.HTTPMiddleware + } else { + httpMiddleware = append(defaultHTTPMiddleware, cfg.HTTPMiddleware...) + } + + return httpMiddleware, nil +} + // Run the server; blocks until SIGTERM (if signal handling is enabled), an error is received, or Stop() is called. func (s *Server) Run() error { errChan := make(chan error, 1) @@ -563,18 +552,6 @@ func (s *Server) Run() error { handleGRPCError(err, errChan) }() - // grpchttpmux will only be set if grpchttpmux RouteHTTPToGRPC is set - if s.grpchttpmux != nil { - go func() { - err := s.grpchttpmux.Serve() - handleGRPCError(err, errChan) - }() - go func() { - err := s.GRPCOnHTTPServer.Serve(s.grpcOnHTTPListener) - handleGRPCError(err, errChan) - }() - } - return <-errChan } diff --git a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go index 08653eda38abd..70c86d16d85dd 100644 --- a/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go +++ b/vendor/github.com/grafana/dskit/spanlogger/spanlogger.go @@ -158,7 +158,7 @@ func (s *SpanLogger) getLogger() log.Logger { traceID, ok := tracing.ExtractSampledTraceID(s.ctx) if ok { - logger = log.With(logger, "traceID", traceID) + logger = log.With(logger, "trace_id", traceID) } // If the value has been set by another goroutine, fetch that other value and discard the one we made. if !s.logger.CompareAndSwap(nil, &logger) { @@ -167,3 +167,17 @@ func (s *SpanLogger) getLogger() log.Logger { } return logger } + +// SetSpanAndLogTag sets a tag on the span used by this SpanLogger, and appends a key/value pair to the logger used for +// future log lines emitted by this SpanLogger. +// +// It is not safe to call this method from multiple goroutines simultaneously. +// It is safe to call this method at the same time as calling other SpanLogger methods, however, this may produce +// inconsistent results (eg. some log lines may be emitted with the provided key/value pair, and others may not). +func (s *SpanLogger) SetSpanAndLogTag(key string, value interface{}) { + s.Span.SetTag(key, value) + + logger := s.getLogger() + wrappedLogger := log.With(logger, key, value) + s.logger.Store(&wrappedLogger) +} diff --git a/vendor/github.com/grafana/dskit/user/grpc.go b/vendor/github.com/grafana/dskit/user/grpc.go index 201b835eeab7d..fcfd3d7a91cdc 100644 --- a/vendor/github.com/grafana/dskit/user/grpc.go +++ b/vendor/github.com/grafana/dskit/user/grpc.go @@ -13,13 +13,8 @@ import ( // ExtractFromGRPCRequest extracts the user ID from the request metadata and returns // the user ID and a context with the user ID injected. func ExtractFromGRPCRequest(ctx context.Context) (string, context.Context, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", ctx, ErrNoOrgID - } - - orgIDs, ok := md[lowerOrgIDHeaderName] - if !ok || len(orgIDs) != 1 { + orgIDs := metadata.ValueFromIncomingContext(ctx, lowerOrgIDHeaderName) + if len(orgIDs) != 1 { return "", ctx, ErrNoOrgID } diff --git a/vendor/github.com/grafana/gomemcache/memcache/memcache.go b/vendor/github.com/grafana/gomemcache/memcache/memcache.go index c5962d092e0f6..c627cbdf9834c 100644 --- a/vendor/github.com/grafana/gomemcache/memcache/memcache.go +++ b/vendor/github.com/grafana/gomemcache/memcache/memcache.go @@ -619,7 +619,7 @@ func (c *Client) GetMulti(keys []string, opts ...Option) (map[string]*Item, erro options := newOptions(opts...) var lk sync.Mutex - m := make(map[string]*Item) + m := make(map[string]*Item, len(keys)) addItemToMap := func(it *Item) { lk.Lock() defer lk.Unlock() diff --git a/vendor/github.com/soheilhy/cmux/.gitignore b/vendor/github.com/soheilhy/cmux/.gitignore deleted file mode 100644 index daf913b1b347a..0000000000000 --- a/vendor/github.com/soheilhy/cmux/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/soheilhy/cmux/.travis.yml b/vendor/github.com/soheilhy/cmux/.travis.yml deleted file mode 100644 index 4d78a519feb62..0000000000000 --- a/vendor/github.com/soheilhy/cmux/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -matrix: - allow_failures: - - go: tip - -gobuild_args: -race - -before_install: - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/kisielk/errcheck; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u golang.org/x/lint/golint; fi - -before_script: - - '! gofmt -s -l . | read' - - echo $TRAVIS_GO_VERSION - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then golint ./...; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then errcheck ./...; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet .; fi - - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet --shadow .; fi - -script: - - go test -bench . -v ./... - - go test -race -bench . -v ./... diff --git a/vendor/github.com/soheilhy/cmux/CONTRIBUTORS b/vendor/github.com/soheilhy/cmux/CONTRIBUTORS deleted file mode 100644 index 49878f228a122..0000000000000 --- a/vendor/github.com/soheilhy/cmux/CONTRIBUTORS +++ /dev/null @@ -1,12 +0,0 @@ -# The list of people who have contributed code to the cmux repository. -# -# Auto-generated with: -# git log --oneline --pretty=format:'%an <%aE>' | sort -u -# -Andreas Jaekle -Dmitri Shuralyov -Ethan Mosbaugh -Soheil Hassas Yeganeh -Soheil Hassas Yeganeh -Tamir Duberstein -Tamir Duberstein diff --git a/vendor/github.com/soheilhy/cmux/LICENSE b/vendor/github.com/soheilhy/cmux/LICENSE deleted file mode 100644 index d645695673349..0000000000000 --- a/vendor/github.com/soheilhy/cmux/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/soheilhy/cmux/README.md b/vendor/github.com/soheilhy/cmux/README.md deleted file mode 100644 index c4191b70b0035..0000000000000 --- a/vendor/github.com/soheilhy/cmux/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# cmux: Connection Mux ![Travis Build Status](https://api.travis-ci.org/soheilhy/args.svg?branch=master "Travis Build Status") [![GoDoc](https://godoc.org/github.com/soheilhy/cmux?status.svg)](http://godoc.org/github.com/soheilhy/cmux) - -cmux is a generic Go library to multiplex connections based on -their payload. Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, -Go RPC, and pretty much any other protocol on the same TCP listener. - -## How-To -Simply create your main listener, create a cmux for that listener, -and then match connections: -```go -// Create the main listener. -l, err := net.Listen("tcp", ":23456") -if err != nil { - log.Fatal(err) -} - -// Create a cmux. -m := cmux.New(l) - -// Match connections in order: -// First grpc, then HTTP, and otherwise Go RPC/TCP. -grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) -httpL := m.Match(cmux.HTTP1Fast()) -trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched. - -// Create your protocol servers. -grpcS := grpc.NewServer() -grpchello.RegisterGreeterServer(grpcS, &server{}) - -httpS := &http.Server{ - Handler: &helloHTTP1Handler{}, -} - -trpcS := rpc.NewServer() -trpcS.Register(&ExampleRPCRcvr{}) - -// Use the muxed listeners for your servers. -go grpcS.Serve(grpcL) -go httpS.Serve(httpL) -go trpcS.Accept(trpcL) - -// Start serving! -m.Serve() -``` - -Take a look at [other examples in the GoDoc](http://godoc.org/github.com/soheilhy/cmux/#pkg-examples). - -## Docs -* [GoDocs](https://godoc.org/github.com/soheilhy/cmux) - -## Performance -There is room for improvment but, since we are only matching -the very first bytes of a connection, the performance overheads on -long-lived connections (i.e., RPCs and pipelined HTTP streams) -is negligible. - -*TODO(soheil)*: Add benchmarks. - -## Limitations -* *TLS*: `net/http` uses a type assertion to identify TLS connections; since -cmux's lookahead-implementing connection wraps the underlying TLS connection, -this type assertion fails. -Because of that, you can serve HTTPS using cmux but `http.Request.TLS` -would not be set in your handlers. - -* *Different Protocols on The Same Connection*: `cmux` matches the connection -when it's accepted. For example, one connection can be either gRPC or REST, but -not both. That is, we assume that a client connection is either used for gRPC -or REST. - -* *Java gRPC Clients*: Java gRPC client blocks until it receives a SETTINGS -frame from the server. If you are using the Java client to connect to a cmux'ed -gRPC server please match with writers: -```go -grpcl := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc")) -``` - -# Copyright and License -Copyright 2016 The CMux Authors. All rights reserved. - -See [CONTRIBUTORS](https://github.com/soheilhy/cmux/blob/master/CONTRIBUTORS) -for the CMux Authors. Code is released under -[the Apache 2 license](https://github.com/soheilhy/cmux/blob/master/LICENSE). diff --git a/vendor/github.com/soheilhy/cmux/buffer.go b/vendor/github.com/soheilhy/cmux/buffer.go deleted file mode 100644 index f8cf30a1e66af..0000000000000 --- a/vendor/github.com/soheilhy/cmux/buffer.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "bytes" - "io" -) - -// bufferedReader is an optimized implementation of io.Reader that behaves like -// ``` -// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer)) -// ``` -// without allocating. -type bufferedReader struct { - source io.Reader - buffer bytes.Buffer - bufferRead int - bufferSize int - sniffing bool - lastErr error -} - -func (s *bufferedReader) Read(p []byte) (int, error) { - if s.bufferSize > s.bufferRead { - // If we have already read something from the buffer before, we return the - // same data and the last error if any. We need to immediately return, - // otherwise we may block for ever, if we try to be smart and call - // source.Read() seeking a little bit of more data. - bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize]) - s.bufferRead += bn - return bn, s.lastErr - } else if !s.sniffing && s.buffer.Cap() != 0 { - // We don't need the buffer anymore. - // Reset it to release the internal slice. - s.buffer = bytes.Buffer{} - } - - // If there is nothing more to return in the sniffed buffer, read from the - // source. - sn, sErr := s.source.Read(p) - if sn > 0 && s.sniffing { - s.lastErr = sErr - if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil { - return wn, wErr - } - } - return sn, sErr -} - -func (s *bufferedReader) reset(snif bool) { - s.sniffing = snif - s.bufferRead = 0 - s.bufferSize = s.buffer.Len() -} diff --git a/vendor/github.com/soheilhy/cmux/cmux.go b/vendor/github.com/soheilhy/cmux/cmux.go deleted file mode 100644 index 5ba921e72dc06..0000000000000 --- a/vendor/github.com/soheilhy/cmux/cmux.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "errors" - "fmt" - "io" - "net" - "sync" - "time" -) - -// Matcher matches a connection based on its content. -type Matcher func(io.Reader) bool - -// MatchWriter is a match that can also write response (say to do handshake). -type MatchWriter func(io.Writer, io.Reader) bool - -// ErrorHandler handles an error and returns whether -// the mux should continue serving the listener. -type ErrorHandler func(error) bool - -var _ net.Error = ErrNotMatched{} - -// ErrNotMatched is returned whenever a connection is not matched by any of -// the matchers registered in the multiplexer. -type ErrNotMatched struct { - c net.Conn -} - -func (e ErrNotMatched) Error() string { - return fmt.Sprintf("mux: connection %v not matched by an matcher", - e.c.RemoteAddr()) -} - -// Temporary implements the net.Error interface. -func (e ErrNotMatched) Temporary() bool { return true } - -// Timeout implements the net.Error interface. -func (e ErrNotMatched) Timeout() bool { return false } - -type errListenerClosed string - -func (e errListenerClosed) Error() string { return string(e) } -func (e errListenerClosed) Temporary() bool { return false } -func (e errListenerClosed) Timeout() bool { return false } - -// ErrListenerClosed is returned from muxListener.Accept when the underlying -// listener is closed. -var ErrListenerClosed = errListenerClosed("mux: listener closed") - -// ErrServerClosed is returned from muxListener.Accept when mux server is closed. -var ErrServerClosed = errors.New("mux: server closed") - -// for readability of readTimeout -var noTimeout time.Duration - -// New instantiates a new connection multiplexer. -func New(l net.Listener) CMux { - return &cMux{ - root: l, - bufLen: 1024, - errh: func(_ error) bool { return true }, - donec: make(chan struct{}), - readTimeout: noTimeout, - } -} - -// CMux is a multiplexer for network connections. -type CMux interface { - // Match returns a net.Listener that sees (i.e., accepts) only - // the connections matched by at least one of the matcher. - // - // The order used to call Match determines the priority of matchers. - Match(...Matcher) net.Listener - // MatchWithWriters returns a net.Listener that accepts only the - // connections that matched by at least of the matcher writers. - // - // Prefer Matchers over MatchWriters, since the latter can write on the - // connection before the actual handler. - // - // The order used to call Match determines the priority of matchers. - MatchWithWriters(...MatchWriter) net.Listener - // Serve starts multiplexing the listener. Serve blocks and perhaps - // should be invoked concurrently within a go routine. - Serve() error - // Closes cmux server and stops accepting any connections on listener - Close() - // HandleError registers an error handler that handles listener errors. - HandleError(ErrorHandler) - // sets a timeout for the read of matchers - SetReadTimeout(time.Duration) -} - -type matchersListener struct { - ss []MatchWriter - l muxListener -} - -type cMux struct { - root net.Listener - bufLen int - errh ErrorHandler - sls []matchersListener - readTimeout time.Duration - donec chan struct{} - mu sync.Mutex -} - -func matchersToMatchWriters(matchers []Matcher) []MatchWriter { - mws := make([]MatchWriter, 0, len(matchers)) - for _, m := range matchers { - cm := m - mws = append(mws, func(w io.Writer, r io.Reader) bool { - return cm(r) - }) - } - return mws -} - -func (m *cMux) Match(matchers ...Matcher) net.Listener { - mws := matchersToMatchWriters(matchers) - return m.MatchWithWriters(mws...) -} - -func (m *cMux) MatchWithWriters(matchers ...MatchWriter) net.Listener { - ml := muxListener{ - Listener: m.root, - connc: make(chan net.Conn, m.bufLen), - donec: make(chan struct{}), - } - m.sls = append(m.sls, matchersListener{ss: matchers, l: ml}) - return ml -} - -func (m *cMux) SetReadTimeout(t time.Duration) { - m.readTimeout = t -} - -func (m *cMux) Serve() error { - var wg sync.WaitGroup - - defer func() { - m.closeDoneChans() - wg.Wait() - - for _, sl := range m.sls { - close(sl.l.connc) - // Drain the connections enqueued for the listener. - for c := range sl.l.connc { - _ = c.Close() - } - } - }() - - for { - c, err := m.root.Accept() - if err != nil { - if !m.handleErr(err) { - return err - } - continue - } - - wg.Add(1) - go m.serve(c, m.donec, &wg) - } -} - -func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) { - defer wg.Done() - - muc := newMuxConn(c) - if m.readTimeout > noTimeout { - _ = c.SetReadDeadline(time.Now().Add(m.readTimeout)) - } - for _, sl := range m.sls { - for _, s := range sl.ss { - matched := s(muc.Conn, muc.startSniffing()) - if matched { - muc.doneSniffing() - if m.readTimeout > noTimeout { - _ = c.SetReadDeadline(time.Time{}) - } - select { - case sl.l.connc <- muc: - case <-donec: - _ = c.Close() - } - return - } - } - } - - _ = c.Close() - err := ErrNotMatched{c: c} - if !m.handleErr(err) { - _ = m.root.Close() - } -} - -func (m *cMux) Close() { - m.closeDoneChans() -} - -func (m *cMux) closeDoneChans() { - m.mu.Lock() - defer m.mu.Unlock() - - select { - case <-m.donec: - // Already closed. Don't close again - default: - close(m.donec) - } - for _, sl := range m.sls { - select { - case <-sl.l.donec: - // Already closed. Don't close again - default: - close(sl.l.donec) - } - } -} - -func (m *cMux) HandleError(h ErrorHandler) { - m.errh = h -} - -func (m *cMux) handleErr(err error) bool { - if !m.errh(err) { - return false - } - - if ne, ok := err.(net.Error); ok { - return ne.Temporary() - } - - return false -} - -type muxListener struct { - net.Listener - connc chan net.Conn - donec chan struct{} -} - -func (l muxListener) Accept() (net.Conn, error) { - select { - case c, ok := <-l.connc: - if !ok { - return nil, ErrListenerClosed - } - return c, nil - case <-l.donec: - return nil, ErrServerClosed - } -} - -// MuxConn wraps a net.Conn and provides transparent sniffing of connection data. -type MuxConn struct { - net.Conn - buf bufferedReader -} - -func newMuxConn(c net.Conn) *MuxConn { - return &MuxConn{ - Conn: c, - buf: bufferedReader{source: c}, - } -} - -// From the io.Reader documentation: -// -// When Read encounters an error or end-of-file condition after -// successfully reading n > 0 bytes, it returns the number of -// bytes read. It may return the (non-nil) error from the same call -// or return the error (and n == 0) from a subsequent call. -// An instance of this general case is that a Reader returning -// a non-zero number of bytes at the end of the input stream may -// return either err == EOF or err == nil. The next Read should -// return 0, EOF. -func (m *MuxConn) Read(p []byte) (int, error) { - return m.buf.Read(p) -} - -func (m *MuxConn) startSniffing() io.Reader { - m.buf.reset(true) - return &m.buf -} - -func (m *MuxConn) doneSniffing() { - m.buf.reset(false) -} diff --git a/vendor/github.com/soheilhy/cmux/doc.go b/vendor/github.com/soheilhy/cmux/doc.go deleted file mode 100644 index aaa8f3158998e..0000000000000 --- a/vendor/github.com/soheilhy/cmux/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -// Package cmux is a library to multiplex network connections based on -// their payload. Using cmux, you can serve different protocols from the -// same listener. -package cmux diff --git a/vendor/github.com/soheilhy/cmux/matchers.go b/vendor/github.com/soheilhy/cmux/matchers.go deleted file mode 100644 index 878ae98cc3cc5..0000000000000 --- a/vendor/github.com/soheilhy/cmux/matchers.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "bufio" - "crypto/tls" - "io" - "io/ioutil" - "net/http" - "strings" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -// Any is a Matcher that matches any connection. -func Any() Matcher { - return func(r io.Reader) bool { return true } -} - -// PrefixMatcher returns a matcher that matches a connection if it -// starts with any of the strings in strs. -func PrefixMatcher(strs ...string) Matcher { - pt := newPatriciaTreeString(strs...) - return pt.matchPrefix -} - -func prefixByteMatcher(list ...[]byte) Matcher { - pt := newPatriciaTree(list...) - return pt.matchPrefix -} - -var defaultHTTPMethods = []string{ - "OPTIONS", - "GET", - "HEAD", - "POST", - "PUT", - "DELETE", - "TRACE", - "CONNECT", -} - -// HTTP1Fast only matches the methods in the HTTP request. -// -// This matcher is very optimistic: if it returns true, it does not mean that -// the request is a valid HTTP response. If you want a correct but slower HTTP1 -// matcher, use HTTP1 instead. -func HTTP1Fast(extMethods ...string) Matcher { - return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...) -} - -// TLS matches HTTPS requests. -// -// By default, any TLS handshake packet is matched. An optional whitelist -// of versions can be passed in to restrict the matcher, for example: -// TLS(tls.VersionTLS11, tls.VersionTLS12) -func TLS(versions ...int) Matcher { - if len(versions) == 0 { - versions = []int{ - tls.VersionSSL30, - tls.VersionTLS10, - tls.VersionTLS11, - tls.VersionTLS12, - } - } - prefixes := [][]byte{} - for _, v := range versions { - prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)}) - } - return prefixByteMatcher(prefixes...) -} - -const maxHTTPRead = 4096 - -// HTTP1 parses the first line or upto 4096 bytes of the request to see if -// the conection contains an HTTP request. -func HTTP1() Matcher { - return func(r io.Reader) bool { - br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead}) - l, part, err := br.ReadLine() - if err != nil || part { - return false - } - - _, _, proto, ok := parseRequestLine(string(l)) - if !ok { - return false - } - - v, _, ok := http.ParseHTTPVersion(proto) - return ok && v == 1 - } -} - -// grabbed from net/http. -func parseRequestLine(line string) (method, uri, proto string, ok bool) { - s1 := strings.Index(line, " ") - s2 := strings.Index(line[s1+1:], " ") - if s1 < 0 || s2 < 0 { - return - } - s2 += s1 + 1 - return line[:s1], line[s1+1 : s2], line[s2+1:], true -} - -// HTTP2 parses the frame header of the first frame to detect whether the -// connection is an HTTP2 connection. -func HTTP2() Matcher { - return hasHTTP2Preface -} - -// HTTP1HeaderField returns a matcher matching the header fields of the first -// request of an HTTP 1 connection. -func HTTP1HeaderField(name, value string) Matcher { - return func(r io.Reader) bool { - return matchHTTP1Field(r, name, func(gotValue string) bool { - return gotValue == value - }) - } -} - -// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the -// first request of an HTTP 1 connection. If the header with key name has a -// value prefixed with valuePrefix, this will match. -func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher { - return func(r io.Reader) bool { - return matchHTTP1Field(r, name, func(gotValue string) bool { - return strings.HasPrefix(gotValue, valuePrefix) - }) - } -} - -// HTTP2HeaderField returns a matcher matching the header fields of the first -// headers frame. -func HTTP2HeaderField(name, value string) Matcher { - return func(r io.Reader) bool { - return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool { - return gotValue == value - }) - } -} - -// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the -// first headers frame. If the header with key name has a value prefixed with -// valuePrefix, this will match. -func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher { - return func(r io.Reader) bool { - return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool { - return strings.HasPrefix(gotValue, valuePrefix) - }) - } -} - -// HTTP2MatchHeaderFieldSendSettings matches the header field and writes the -// settings to the server. Prefer HTTP2HeaderField over this one, if the client -// does not block on receiving a SETTING frame. -func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter { - return func(w io.Writer, r io.Reader) bool { - return matchHTTP2Field(w, r, name, func(gotValue string) bool { - return gotValue == value - }) - } -} - -// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix -// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over -// this one, if the client does not block on receiving a SETTING frame. -func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter { - return func(w io.Writer, r io.Reader) bool { - return matchHTTP2Field(w, r, name, func(gotValue string) bool { - return strings.HasPrefix(gotValue, valuePrefix) - }) - } -} - -func hasHTTP2Preface(r io.Reader) bool { - var b [len(http2.ClientPreface)]byte - last := 0 - - for { - n, err := r.Read(b[last:]) - if err != nil { - return false - } - - last += n - eq := string(b[:last]) == http2.ClientPreface[:last] - if last == len(http2.ClientPreface) { - return eq - } - if !eq { - return false - } - } -} - -func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) { - req, err := http.ReadRequest(bufio.NewReader(r)) - if err != nil { - return false - } - - return matches(req.Header.Get(name)) -} - -func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) { - if !hasHTTP2Preface(r) { - return false - } - - done := false - framer := http2.NewFramer(w, r) - hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) { - if hf.Name == name { - done = true - if matches(hf.Value) { - matched = true - } - } - }) - for { - f, err := framer.ReadFrame() - if err != nil { - return false - } - - switch f := f.(type) { - case *http2.SettingsFrame: - // Sender acknoweldged the SETTINGS frame. No need to write - // SETTINGS again. - if f.IsAck() { - break - } - if err := framer.WriteSettings(); err != nil { - return false - } - case *http2.ContinuationFrame: - if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { - return false - } - done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 - case *http2.HeadersFrame: - if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil { - return false - } - done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 - } - - if done { - return matched - } - } -} diff --git a/vendor/github.com/soheilhy/cmux/patricia.go b/vendor/github.com/soheilhy/cmux/patricia.go deleted file mode 100644 index c3e3d85bdeaf0..0000000000000 --- a/vendor/github.com/soheilhy/cmux/patricia.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cmux - -import ( - "bytes" - "io" -) - -// patriciaTree is a simple patricia tree that handles []byte instead of string -// and cannot be changed after instantiation. -type patriciaTree struct { - root *ptNode - maxDepth int // max depth of the tree. -} - -func newPatriciaTree(bs ...[]byte) *patriciaTree { - max := 0 - for _, b := range bs { - if max < len(b) { - max = len(b) - } - } - return &patriciaTree{ - root: newNode(bs), - maxDepth: max + 1, - } -} - -func newPatriciaTreeString(strs ...string) *patriciaTree { - b := make([][]byte, len(strs)) - for i, s := range strs { - b[i] = []byte(s) - } - return newPatriciaTree(b...) -} - -func (t *patriciaTree) matchPrefix(r io.Reader) bool { - buf := make([]byte, t.maxDepth) - n, _ := io.ReadFull(r, buf) - return t.root.match(buf[:n], true) -} - -func (t *patriciaTree) match(r io.Reader) bool { - buf := make([]byte, t.maxDepth) - n, _ := io.ReadFull(r, buf) - return t.root.match(buf[:n], false) -} - -type ptNode struct { - prefix []byte - next map[byte]*ptNode - terminal bool -} - -func newNode(strs [][]byte) *ptNode { - if len(strs) == 0 { - return &ptNode{ - prefix: []byte{}, - terminal: true, - } - } - - if len(strs) == 1 { - return &ptNode{ - prefix: strs[0], - terminal: true, - } - } - - p, strs := splitPrefix(strs) - n := &ptNode{ - prefix: p, - } - - nexts := make(map[byte][][]byte) - for _, s := range strs { - if len(s) == 0 { - n.terminal = true - continue - } - nexts[s[0]] = append(nexts[s[0]], s[1:]) - } - - n.next = make(map[byte]*ptNode) - for first, rests := range nexts { - n.next[first] = newNode(rests) - } - - return n -} - -func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) { - if len(bss) == 0 || len(bss[0]) == 0 { - return prefix, bss - } - - if len(bss) == 1 { - return bss[0], [][]byte{{}} - } - - for i := 0; ; i++ { - var cur byte - eq := true - for j, b := range bss { - if len(b) <= i { - eq = false - break - } - - if j == 0 { - cur = b[i] - continue - } - - if cur != b[i] { - eq = false - break - } - } - - if !eq { - break - } - - prefix = append(prefix, cur) - } - - rest = make([][]byte, 0, len(bss)) - for _, b := range bss { - rest = append(rest, b[len(prefix):]) - } - - return prefix, rest -} - -func (n *ptNode) match(b []byte, prefix bool) bool { - l := len(n.prefix) - if l > 0 { - if l > len(b) { - l = len(b) - } - if !bytes.Equal(b[:l], n.prefix) { - return false - } - } - - if n.terminal && (prefix || len(n.prefix) == len(b)) { - return true - } - - if l >= len(b) { - return false - } - - nextN, ok := n.next[b[l]] - if !ok { - return false - } - - if l == len(b) { - b = b[l:l] - } else { - b = b[l+1:] - } - return nextN.match(b, prefix) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2992d1e44075e..08f33cb6c8345 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -850,7 +850,7 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb +# github.com/grafana/dskit v0.0.0-20240305142548-5fcbd51bb6e4 ## explicit; go 1.20 github.com/grafana/dskit/aws github.com/grafana/dskit/backoff @@ -899,7 +899,7 @@ github.com/grafana/dskit/user # github.com/grafana/go-gelf/v2 v2.0.1 ## explicit; go 1.17 github.com/grafana/go-gelf/v2/gelf -# github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 +# github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 ## explicit; go 1.18 github.com/grafana/gomemcache/memcache # github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d @@ -1363,9 +1363,6 @@ github.com/shurcooL/vfsgen # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/soheilhy/cmux v0.1.5 -## explicit; go 1.11 -github.com/soheilhy/cmux # github.com/sony/gobreaker v0.5.0 ## explicit; go 1.12 github.com/sony/gobreaker