diff --git a/go.mod b/go.mod
index e0bd67b1b7a..18a78d0496a 100644
--- a/go.mod
+++ b/go.mod
@@ -9,8 +9,8 @@ require (
github.com/containerd/containerd v1.7.11
github.com/go-git/go-git/v5 v5.10.0
github.com/google/go-cmp v0.6.0
- github.com/google/go-containerregistry v0.17.0
- github.com/google/uuid v1.4.0
+ github.com/google/go-containerregistry v0.19.1
+ github.com/google/uuid v1.6.0
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2
@@ -24,8 +24,8 @@ require (
github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb
go.opencensus.io v0.24.0
go.uber.org/zap v1.26.0
- golang.org/x/exp v0.0.0-20230307190834-24139beb5833
- golang.org/x/oauth2 v0.15.0 // indirect
+ golang.org/x/exp v0.0.0-20230321023759-10a507213a29
+ golang.org/x/oauth2 v0.17.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/square/go-jose.v2 v2.6.0
k8s.io/api v0.27.1
@@ -52,14 +52,14 @@ require (
github.com/google/cel-go v0.18.1
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b
github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa
- github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6
- github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6
- github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6
- github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6
- go.opentelemetry.io/otel v1.19.0
+ github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.2
+ github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.2
+ github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.2
+ github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.2
+ go.opentelemetry.io/otel v1.23.0
go.opentelemetry.io/otel/exporters/jaeger v1.17.0
- go.opentelemetry.io/otel/sdk v1.19.0
- go.opentelemetry.io/otel/trace v1.19.0
+ go.opentelemetry.io/otel/sdk v1.21.0
+ go.opentelemetry.io/otel/trace v1.23.0
k8s.io/utils v0.0.0-20230505201702-9f6742963106
)
@@ -78,20 +78,20 @@ require (
require (
cloud.google.com/go/compute/metadata v0.2.3 // indirect
- cloud.google.com/go/iam v1.1.5 // indirect
- cloud.google.com/go/kms v1.15.5 // indirect
+ cloud.google.com/go/iam v1.1.6 // indirect
+ cloud.google.com/go/kms v1.15.7 // indirect
dario.cat/mergo v1.0.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/kms v1.27.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/kms v1.29.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -99,14 +99,15 @@ require (
github.com/davidmz/go-pageant v1.0.2 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/fatih/color v1.13.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
- github.com/go-jose/go-jose/v3 v3.0.1 // indirect
+ github.com/go-jose/go-jose/v3 v3.0.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.0 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
@@ -115,29 +116,32 @@ require (
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/hashicorp/vault/api v1.10.0 // indirect
- github.com/jellydator/ttlcache/v3 v3.1.0 // indirect
+ github.com/hashicorp/vault/api v1.12.0 // indirect
+ github.com/jellydator/ttlcache/v3 v3.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
+ github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
github.com/mattn/go-colorable v0.1.9 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
- github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
+ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
- github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/skeema/knownhosts v1.2.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/zeebo/errs v1.3.0 // indirect
- go.opentelemetry.io/otel/metric v1.19.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect
+ go.opentelemetry.io/otel/metric v1.23.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect
+ gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
)
require (
- cloud.google.com/go/compute v1.23.3 // indirect
+ cloud.google.com/go/compute v1.23.4 // indirect
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
@@ -149,19 +153,19 @@ require (
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
- github.com/aws/aws-sdk-go-v2 v1.23.5 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.25.11 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.16.9 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.25.2 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.27.4 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 // indirect
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 // indirect
- github.com/aws/smithy-go v1.18.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect
+ github.com/aws/smithy-go v1.20.1 // indirect
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -179,9 +183,9 @@ require (
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
- github.com/go-kit/log v0.2.0 // indirect
+ github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
- github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
@@ -208,10 +212,10 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/openzipkin/zipkin-go v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_golang v1.14.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/prometheus/client_golang v1.15.1 // indirect
+ github.com/prometheus/client_model v0.4.0 // indirect
+ github.com/prometheus/common v0.42.0 // indirect
+ github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/statsd_exporter v0.21.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260 // indirect
@@ -223,20 +227,20 @@ require (
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/automaxprocs v1.4.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
- golang.org/x/crypto v0.17.0 // indirect
+ golang.org/x/crypto v0.20.0 // indirect
golang.org/x/mod v0.12.0 // indirect
- golang.org/x/net v0.19.0 // indirect
- golang.org/x/sync v0.5.0
- golang.org/x/sys v0.15.0 // indirect
- golang.org/x/term v0.15.0 // indirect
+ golang.org/x/net v0.21.0 // indirect
+ golang.org/x/sync v0.6.0
+ golang.org/x/sys v0.17.0 // indirect
+ golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.13.0 // indirect
- google.golang.org/api v0.152.0 // indirect
+ google.golang.org/api v0.167.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
- google.golang.org/grpc v1.60.1
- google.golang.org/protobuf v1.31.0
+ google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect
+ google.golang.org/grpc v1.61.1
+ google.golang.org/protobuf v1.32.0
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 368a22b0a20..651372e4b51 100644
--- a/go.sum
+++ b/go.sum
@@ -20,24 +20,24 @@ cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECH
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
+cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
-cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
+cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw=
+cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
-cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
-cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM=
-cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI=
+cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
+cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM=
+cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -59,14 +59,14 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
-github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
-github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64=
+github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
@@ -94,8 +94,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -153,51 +153,51 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.48.11 h1:9YbiSbaF/jWi+qLRl+J5dEhr2mcbDYHmKg2V7RBcD5M=
+github.com/aws/aws-sdk-go v1.50.25 h1:vhiHtLYybv1Nhx3Kv18BBC6L0aPJHaG9aeEsr92W99c=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
-github.com/aws/aws-sdk-go-v2 v1.23.5 h1:xK6C4udTyDMd82RFvNkDQxtAd00xlzFUtX4fF2nMZyg=
-github.com/aws/aws-sdk-go-v2 v1.23.5/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds=
+github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w=
+github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo=
github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4=
-github.com/aws/aws-sdk-go-v2/config v1.25.11 h1:RWzp7jhPRliIcACefGkKp03L0Yofmd2p8M25kbiyvno=
-github.com/aws/aws-sdk-go-v2/config v1.25.11/go.mod h1:BVUs0chMdygHsQtvaMyEOpW2GIW+ubrxJLgIz/JU29s=
+github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M=
+github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g=
github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.9 h1:LQo3MUIOzod9JdUK+wxmSdgzLVYUbII3jXn3S/HJZU0=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.9/go.mod h1:R7mDuIJoCjH6TxGUc/cylE7Lp/o0bhKVoxdBThsjqCM=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 h1:FZVFahMyZle6WcogZCOxo6D/lkDA2lqKIn4/ueUmVXw=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9/go.mod h1:kjq7REMIkxdtcEC9/4BVXjOsNY5isz6jQbEgk6osRTU=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8 h1:8GVZIR0y6JRIUNSYI1xAMF4HDfV8H/bOsZ/8AD/uY5Q=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.8/go.mod h1:rwBfu0SoUkBUZndVgPZKAD9Y2JigaZtRP68unRiYToQ=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8 h1:ZE2ds/qeBkhk3yqYvS3CDCFNvd9ir5hMjlVStLZWrvM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.8/go.mod h1:/lAPPymDYL023+TS6DJmjuL42nxix2AvEvfjqOBRODk=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 h1:wlTgmb/sCmVRJrN5De3CiHj4v/bTCgL5+qpdEd0CPtw=
github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11/go.mod h1:Ce1q2jlNm8BVpjLaOnwnm5v2RClAbK6txwPljFzyW6c=
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 h1:yflJrGmi1pXtP9lOpOeaNZyc0vXnJTuP2sor3nJcGGo=
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2/go.mod h1:uHtRE7aqXNmpeYL+7Ec7LacH5zC9+w2T5MBOeEKDdu0=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8 h1:EamsKe+ZjkOQjDdHd86/JCEucjFKQ9T0atWKO4s2Lgs=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.8/go.mod h1:Q0vV3/csTpbkfKLI5Sb56cJQTCTtJ0ixdb7P+Wedqiw=
-github.com/aws/aws-sdk-go-v2/service/kms v1.27.2 h1:I0NiSQiZu1UzP0akJWXSacjckEpYdN4VN7XYYfW6EYs=
-github.com/aws/aws-sdk-go-v2/service/kms v1.27.2/go.mod h1:E2IzqbIZfYuYUgib2KxlaweBbkxHCb3ZIgnp85TjKic=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0=
+github.com/aws/aws-sdk-go-v2/service/kms v1.29.1 h1:OdjJjUWFlMZLAMl54ASxIpZdGEesY4BH3/c0HAPSFdI=
+github.com/aws/aws-sdk-go-v2/service/kms v1.29.1/go.mod h1:Cbx2uxEX0bAB7SlSY+ys05ZBkEb8IbmuAOcGVmDfJFs=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.2 h1:xJPydhNm0Hiqct5TVKEuHG7weC0+sOs4MUnd7A5n5F4=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.2/go.mod h1:zxk6y1X2KXThESWMS5CrKRvISD8mbIMab6nZrCGxDG0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2 h1:8dU9zqA77C5egbU6yd4hFLaiIdPv3rU+6cp7sz5FjCU=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.2/go.mod h1:7Lt5mjQ8x5rVdKqg+sKKDeuwoszDJIIPmkd8BVsEdS0=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.2 h1:fFrLsy08wEbAisqW3KDl/cPHrF43GmV79zXB9EwJiZw=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.2/go.mod h1:7Ld9eTqocTvJqqJ5K/orbSDwmGcpRdlDiLjz2DO+SL8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
-github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c=
-github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
+github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
+github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 h1:G5IT+PEpFY0CDb3oITDP9tkmLrHkVD8Ny+elUmBqVYI=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -231,7 +231,6 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
@@ -258,6 +257,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
@@ -438,17 +438,17 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
-github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
-github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
-github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
@@ -474,13 +474,13 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
-github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-jose/go-jose/v3 v3.0.2 h1:2Edjn8Nrb44UvTdp84KU0bBPs1cO7noRCybtS3eJEUQ=
+github.com/go-jose/go-jose/v3 v3.0.2/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@@ -491,8 +491,8 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
@@ -536,8 +536,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
-github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
+github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -601,8 +601,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw=
-github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk=
-github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
+github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
+github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b h1:ptt4Cmxx6HsJQUSRp0LRB8nAxMdn9mxnqhc4dxwYlSM=
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230625233257-b8504803389b/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY=
github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0=
@@ -638,14 +638,14 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM=
+github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@@ -716,10 +716,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ=
-github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
-github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
-github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
+github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4=
+github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -739,8 +737,8 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
-github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g=
-github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
+github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE=
+github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jenkins-x/go-scm v1.14.24 h1:m6btHeg/DUXNJeRa4ySAgwmwCEY7s1OjSkdK6Y1DFKI=
github.com/jenkins-x/go-scm v1.14.24/go.mod h1:lKqSATMN9YB1Dbl8s8tzaUf5AFlAd+JRFipu2M6Ut2Y=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -750,7 +748,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
+github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
@@ -799,8 +797,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8=
-github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
+github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo=
+github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -943,8 +941,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
-github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
-github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -962,16 +960,15 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
+github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -980,9 +977,8 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
+github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -994,9 +990,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8=
github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
@@ -1018,8 +1013,8 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
-github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
+github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
+github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
@@ -1030,14 +1025,14 @@ github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5K
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
-github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6 h1:WzZExOcFanrFfCi7SUgkBtJicWnSNziBD9nSSQIrqhc=
-github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.6/go.mod h1:3zOHOLHnCE6EXyVH+6Z/lC9O1RDsbmR045NQ1DogiHw=
-github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6 h1:wsPt9kNXF1ZZyae2wO35NLsK+cjWqPGpuPaDdXzRe0g=
-github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.6/go.mod h1:LH+ct6D77J8Ks6PXijMYYhmlQ1mbqKHbmy7+Sw5/Woc=
-github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6 h1:aMVT9XXFQEnBtJ6szzanyAdKT5gFK4emN+jLSlFlOso=
-github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.6/go.mod h1:Hwhlx8JSZJF1R27JlwW/Bl2h40reG3MfKANREtBI0L8=
-github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6 h1:TdSHzcFtPJxbk4B+huWC6GDq7OpgHmLg18inRo9u70I=
-github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.6/go.mod h1:/l/PzSbTOuIAtglOwUdlzzYvjIZ2WyaBpt5722JTmLY=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.2 h1:e0EtUcE7cqWBxxME7h6upA3EA0IR3EOE3F1t+WHOdTc=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.2/go.mod h1:07qBxPjI9bsgdQRiBz27Ai+gl6hgr//vwXMZzTX87Us=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.2 h1:Fgt4dC9OozkLEtMO6JYfFgqNdSDG1y1uAdiJgrtZYN4=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.2/go.mod h1:BT+jh/GK55djPRHqTYu937eq29Zzusf1t0qVbrcn4Aw=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.2 h1:aX6hLH5v3JdOQJJ6+uCMmeDjcwyfQMLmXKJVl6HtzAg=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.2/go.mod h1:OEFPub6XKsX6Fl/PpeIpQTsukG3I0CFWb9saHINV72U=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.2 h1:hRC8sGPQtnTcoOqWbCNAvLpW1pHL4CQl7FT55IrEof8=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.2/go.mod h1:frWJBbYRRHnbLE9h1fH349Mde84NZh6hDrnKqhPgMNU=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -1135,8 +1130,6 @@ github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:tw
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
-github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
@@ -1180,16 +1173,20 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
-go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA=
+go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E=
+go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
-go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
-go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
-go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
-go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
-go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
-go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
+go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo=
+go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo=
+go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
+go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
+go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI=
+go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -1219,7 +1216,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -1238,8 +1234,9 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
+golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1250,8 +1247,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s=
-golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1349,8 +1346,9 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1365,8 +1363,8 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
-golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
+golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1380,8 +1378,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1470,12 +1468,10 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1488,16 +1484,18 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1511,6 +1509,7 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1617,8 +1616,8 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY=
-google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
+google.golang.org/api v0.167.0 h1:CKHrQD1BLRii6xdkatBDXyKzM0mkawt2QP+H3LtPmSE=
+google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1677,12 +1676,12 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
-google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
-google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
+google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU=
+google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M=
+google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
+google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1712,8 +1711,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
-google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
+google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1730,11 +1729,10 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1746,6 +1744,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
+gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go
index 540ad16ac49..27a1970b9d8 100644
--- a/vendor/cloud.google.com/go/compute/internal/version.go
+++ b/vendor/cloud.google.com/go/compute/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.23.3"
+const Version = "1.23.4"
diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md
index c4cacb03f88..43a17938486 100644
--- a/vendor/cloud.google.com/go/iam/CHANGES.md
+++ b/vendor/cloud.google.com/go/iam/CHANGES.md
@@ -1,6 +1,13 @@
# Changes
+## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30)
+
+
+### Bug Fixes
+
+* **iam:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698))
+
## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01)
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
index 85346a891df..b5243e61291 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
+// protoc-gen-go v1.32.0
// protoc v4.23.2
// source: google/iam/v1/iam_policy.proto
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
index 68f8d761f7f..3f854fe496e 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
+// protoc-gen-go v1.32.0
// protoc v4.23.2
// source: google/iam/v1/options.proto
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
index eefd1d0e546..dfc60661a30 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
+// protoc-gen-go v1.32.0
// protoc v4.23.2
// source: google/iam/v1/policy.proto
@@ -289,11 +289,11 @@ type Policy struct {
// Any operation that affects conditional role bindings must specify version
// `3`. This requirement applies to the following operations:
//
- // * Getting a policy that includes a conditional role binding
- // * Adding a conditional role binding to a policy
- // * Changing a conditional role binding in a policy
- // * Removing any role binding, with or without a condition, from a policy
- // that includes conditions
+ // - Getting a policy that includes a conditional role binding
+ // - Adding a conditional role binding to a policy
+ // - Changing a conditional role binding in a policy
+ // - Removing any role binding, with or without a condition, from a policy
+ // that includes conditions
//
// **Important:** If you use IAM Conditions, you must include the `etag` field
// whenever you call `setIamPolicy`. If you omit this field, then IAM allows
@@ -407,47 +407,43 @@ type Binding struct {
// Specifies the principals requesting access for a Google Cloud resource.
// `members` can have the following values:
//
- // * `allUsers`: A special identifier that represents anyone who is
- // on the internet; with or without a Google account.
+ // - `allUsers`: A special identifier that represents anyone who is
+ // on the internet; with or without a Google account.
//
- // * `allAuthenticatedUsers`: A special identifier that represents anyone
- // who is authenticated with a Google account or a service account.
+ // - `allAuthenticatedUsers`: A special identifier that represents anyone
+ // who is authenticated with a Google account or a service account.
//
- // * `user:{emailid}`: An email address that represents a specific Google
- // account. For example, `alice@example.com` .
+ // - `user:{emailid}`: An email address that represents a specific Google
+ // account. For example, `alice@example.com` .
//
+ // - `serviceAccount:{emailid}`: An email address that represents a service
+ // account. For example, `my-other-app@appspot.gserviceaccount.com`.
//
- // * `serviceAccount:{emailid}`: An email address that represents a service
- // account. For example, `my-other-app@appspot.gserviceaccount.com`.
+ // - `group:{emailid}`: An email address that represents a Google group.
+ // For example, `admins@example.com`.
//
- // * `group:{emailid}`: An email address that represents a Google group.
- // For example, `admins@example.com`.
+ // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+ // identifier) representing a user that has been recently deleted. For
+ // example, `alice@example.com?uid=123456789012345678901`. If the user is
+ // recovered, this value reverts to `user:{emailid}` and the recovered user
+ // retains the role in the binding.
//
- // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
- // identifier) representing a user that has been recently deleted. For
- // example, `alice@example.com?uid=123456789012345678901`. If the user is
- // recovered, this value reverts to `user:{emailid}` and the recovered user
- // retains the role in the binding.
- //
- // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
- // unique identifier) representing a service account that has been recently
- // deleted. For example,
- // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
- // If the service account is undeleted, this value reverts to
- // `serviceAccount:{emailid}` and the undeleted service account retains the
- // role in the binding.
- //
- // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
- // identifier) representing a Google group that has been recently
- // deleted. For example, `admins@example.com?uid=123456789012345678901`. If
- // the group is recovered, this value reverts to `group:{emailid}` and the
- // recovered group retains the role in the binding.
- //
- //
- // * `domain:{domain}`: The G Suite domain (primary) that represents all the
- // users of that domain. For example, `google.com` or `example.com`.
+ // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+ // unique identifier) representing a service account that has been recently
+ // deleted. For example,
+ // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+ // If the service account is undeleted, this value reverts to
+ // `serviceAccount:{emailid}` and the undeleted service account retains the
+ // role in the binding.
//
+ // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+ // identifier) representing a Google group that has been recently
+ // deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+ // the group is recovered, this value reverts to `group:{emailid}` and the
+ // recovered group retains the role in the binding.
//
+ // - `domain:{domain}`: The G Suite domain (primary) that represents all the
+ // users of that domain. For example, `google.com` or `example.com`.
Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
// The condition that is associated with this binding.
//
diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
new file mode 100644
index 00000000000..49c301e3375
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
@@ -0,0 +1,305 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package kms
+
+import (
+ kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ "google.golang.org/api/iterator"
+ locationpb "google.golang.org/genproto/googleapis/cloud/location"
+)
+
+// CryptoKeyIterator manages a stream of *kmspb.CryptoKey.
+type CryptoKeyIterator struct {
+ items []*kmspb.CryptoKey
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) {
+ var item *kmspb.CryptoKey
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *CryptoKeyIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *CryptoKeyIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion.
+type CryptoKeyVersionIterator struct {
+ items []*kmspb.CryptoKeyVersion
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) {
+ var item *kmspb.CryptoKeyVersion
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *CryptoKeyVersionIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *CryptoKeyVersionIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// EkmConnectionIterator manages a stream of *kmspb.EkmConnection.
+type EkmConnectionIterator struct {
+ items []*kmspb.EkmConnection
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *EkmConnectionIterator) Next() (*kmspb.EkmConnection, error) {
+ var item *kmspb.EkmConnection
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *EkmConnectionIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *EkmConnectionIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// ImportJobIterator manages a stream of *kmspb.ImportJob.
+type ImportJobIterator struct {
+ items []*kmspb.ImportJob
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *ImportJobIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *ImportJobIterator) Next() (*kmspb.ImportJob, error) {
+ var item *kmspb.ImportJob
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *ImportJobIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *ImportJobIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// KeyRingIterator manages a stream of *kmspb.KeyRing.
+type KeyRingIterator struct {
+ items []*kmspb.KeyRing
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *KeyRingIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) {
+ var item *kmspb.KeyRing
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *KeyRingIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *KeyRingIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// LocationIterator manages a stream of *locationpb.Location.
+type LocationIterator struct {
+ items []*locationpb.Location
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LocationIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LocationIterator) Next() (*locationpb.Location, error) {
+ var item *locationpb.Location
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *LocationIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *LocationIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go
index f5fad9615c1..b20d1ac4014 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/doc.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -68,22 +68,16 @@
// }
// defer c.Close()
//
-// req := &kmspb.ListEkmConnectionsRequest{
+// req := &kmspb.CreateEkmConnectionRequest{
// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#ListEkmConnectionsRequest.
+// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#CreateEkmConnectionRequest.
// }
-// it := c.ListEkmConnections(ctx, req)
-// for {
-// resp, err := it.Next()
-// if err == iterator.Done {
-// break
-// }
-// if err != nil {
-// // TODO: Handle error.
-// }
-// // TODO: Use resp.
-// _ = resp
+// resp, err := c.CreateEkmConnection(ctx, req)
+// if err != nil {
+// // TODO: Handle error.
// }
+// // TODO: Use resp.
+// _ = resp
//
// # Use of Context
//
diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
index 2cf9f965289..1e8d2ffe0a0 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -63,7 +63,9 @@ type EkmCallOptions struct {
func defaultEkmGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
@@ -463,7 +465,9 @@ func NewEkmRESTClient(ctx context.Context, opts ...option.ClientOption) (*EkmCli
func defaultEkmRESTClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
}
@@ -1620,97 +1624,3 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI
}
return resp, nil
}
-
-// EkmConnectionIterator manages a stream of *kmspb.EkmConnection.
-type EkmConnectionIterator struct {
- items []*kmspb.EkmConnection
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *EkmConnectionIterator) Next() (*kmspb.EkmConnection, error) {
- var item *kmspb.EkmConnection
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *EkmConnectionIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *EkmConnectionIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// LocationIterator manages a stream of *locationpb.Location.
-type LocationIterator struct {
- items []*locationpb.Location
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *LocationIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *LocationIterator) Next() (*locationpb.Location, error) {
- var item *locationpb.Location
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *LocationIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *LocationIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
index 92293dc2957..2f71ee36c77 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -84,7 +84,9 @@ type KeyManagementCallOptions struct {
func defaultKeyManagementGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
@@ -1241,7 +1243,9 @@ func NewKeyManagementRESTClient(ctx context.Context, opts ...option.ClientOption
func defaultKeyManagementRESTClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
}
@@ -4429,191 +4433,3 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i
}
return resp, nil
}
-
-// CryptoKeyIterator manages a stream of *kmspb.CryptoKey.
-type CryptoKeyIterator struct {
- items []*kmspb.CryptoKey
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) {
- var item *kmspb.CryptoKey
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *CryptoKeyIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *CryptoKeyIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion.
-type CryptoKeyVersionIterator struct {
- items []*kmspb.CryptoKeyVersion
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) {
- var item *kmspb.CryptoKeyVersion
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *CryptoKeyVersionIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *CryptoKeyVersionIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// ImportJobIterator manages a stream of *kmspb.ImportJob.
-type ImportJobIterator struct {
- items []*kmspb.ImportJob
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *ImportJobIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *ImportJobIterator) Next() (*kmspb.ImportJob, error) {
- var item *kmspb.ImportJob
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *ImportJobIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *ImportJobIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// KeyRingIterator manages a stream of *kmspb.KeyRing.
-type KeyRingIterator struct {
- items []*kmspb.KeyRing
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *KeyRingIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) {
- var item *kmspb.KeyRing
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *KeyRingIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *KeyRingIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
index fdc98c0846e..632ca94ab52 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
// source: google/cloud/kms/v1/ekm_service.proto
package kmspb
@@ -57,12 +57,13 @@ const (
// * When creating a
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with
// this
- // [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must
- // supply the key path of pre-existing external key material that will be
- // linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
- // * Destruction of external key material cannot be requested via the
- // Cloud KMS API and must be performed directly in the EKM.
- // * Automatic rotation of key material is not supported.
+ //
+ // [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must
+ // supply the key path of pre-existing external key material that will be
+ // linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
+ // - Destruction of external key material cannot be requested via the
+ // Cloud KMS API and must be performed directly in the EKM.
+ // - Automatic rotation of key material is not supported.
EkmConnection_MANUAL EkmConnection_KeyManagementMode = 1
// All [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this
// [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key
@@ -70,13 +71,14 @@ const (
// * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]
// associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection]
// is
- // created, the EKM automatically generates new key material and a new
- // key path. The caller cannot supply the key path of pre-existing
- // external key material.
- // * Destruction of external key material associated with this
- // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by
- // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion].
- // * Automatic rotation of key material is supported.
+ //
+ // created, the EKM automatically generates new key material and a new
+ // key path. The caller cannot supply the key path of pre-existing
+ // external key material.
+ // - Destruction of external key material associated with this
+ // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by
+ // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion].
+ // - Automatic rotation of key material is supported.
EkmConnection_CLOUD_KMS EkmConnection_KeyManagementMode = 2
)
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
index e4b300d9d62..3cc89947e81 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
// source: google/cloud/kms/v1/resources.proto
package kmspb
@@ -933,6 +933,7 @@ type CryptoKey struct {
// Controls the rate of automatic rotation.
//
// Types that are assignable to RotationSchedule:
+ //
// *CryptoKey_RotationPeriod
RotationSchedule isCryptoKey_RotationSchedule `protobuf_oneof:"rotation_schedule"`
// A template describing settings for new
@@ -1484,7 +1485,7 @@ func (x *CryptoKeyVersion) GetReimportEligible() bool {
return false
}
-// The public key for a given
+// The public keys for a given
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via
// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey].
type PublicKey struct {
@@ -1492,18 +1493,22 @@ type PublicKey struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The public key, encoded in PEM format. For more information, see the
- // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for
- // [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and
- // [Textual Encoding of Subject Public Key Info]
+ // A public key encoded in PEM format, populated only when
+ // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
+ // returns one key. For more information, see the [RFC
+ // 7468](https://tools.ietf.org/html/rfc7468) sections for [General
+ // Considerations](https://tools.ietf.org/html/rfc7468#section-2) and [Textual
+ // Encoding of Subject Public Key Info]
// (https://tools.ietf.org/html/rfc7468#section-13).
Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"`
// The
// [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm]
// associated with this key.
Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"`
- // Integrity verification field. A CRC32C checksum of the returned
- // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem]. An integrity check of
+ // Integrity verification field: A CRC32C checksum of the returned
+ // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem]. It is only populated
+ // when [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
+ // returns one key. An integrity check of
// [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] can be performed by
// computing the CRC32C checksum of
// [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] and comparing your
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
index 6ed2a1f89de..3d2fc5b9927 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
// source: google/cloud/kms/v1/service.proto
package kmspb
@@ -1285,14 +1285,16 @@ type ImportCryptoKeyVersionRequest struct {
//
// this field must contain the concatenation of:
//
- // - An ephemeral AES-256 wrapping key wrapped with the
- // [public_key][google.cloud.kms.v1.ImportJob.public_key] using
- // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty
- // label.
- //
- // - The formatted key to be imported, wrapped with the ephemeral AES-256
- // key using AES-KWP (RFC 5649).
- //
+ //
+ // - An ephemeral AES-256 wrapping key wrapped with the
+ // [public_key][google.cloud.kms.v1.ImportJob.public_key] using
+ // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty
+ // label.
+ //
+ // - The formatted key to be imported, wrapped with the ephemeral AES-256
+ // key using AES-KWP (RFC 5649).
+ //
+ //
//
//
// This format is the same as the format produced by PKCS#11 mechanism
@@ -1312,6 +1314,7 @@ type ImportCryptoKeyVersionRequest struct {
// instead.
//
// Types that are assignable to WrappedKeyMaterial:
+ //
// *ImportCryptoKeyVersionRequest_RsaAesWrappedKey
WrappedKeyMaterial isImportCryptoKeyVersionRequest_WrappedKeyMaterial `protobuf_oneof:"wrapped_key_material"`
}
@@ -4029,6 +4032,7 @@ type Digest struct {
// Required. The message digest.
//
// Types that are assignable to Digest:
+ //
// *Digest_Sha256
// *Digest_Sha384
// *Digest_Sha512
diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go
index 30aa7c4bbc2..60412139d26 100644
--- a/vendor/cloud.google.com/go/kms/internal/version.go
+++ b/vendor/cloud.google.com/go/kms/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.15.5"
+const Version = "1.15.7"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index aa30abf3739..7a0a524e332 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,25 @@
# Release History
+## 1.9.2 (2024-02-06)
+
+### Bugs Fixed
+
+* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header.
+
+### Other Changes
+
+* Update to latest version of `internal`.
+
+## 1.9.1 (2023-12-11)
+
+### Bugs Fixed
+
+* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries.
+
+### Other Changes
+
+* Update dependencies.
+
## 1.9.0 (2023-11-06)
### Breaking Changes
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
new file mode 100644
index 00000000000..187fe82b97c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
@@ -0,0 +1,224 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package resource
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ providersKey = "providers"
+ subscriptionsKey = "subscriptions"
+ resourceGroupsLowerKey = "resourcegroups"
+ locationsKey = "locations"
+ builtInResourceNamespace = "Microsoft.Resources"
+)
+
+// RootResourceID defines the tenant as the root parent of all other ResourceID.
+var RootResourceID = &ResourceID{
+ Parent: nil,
+ ResourceType: TenantResourceType,
+ Name: "",
+}
+
+// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`.
+// Don't create this type directly, use ParseResourceID instead.
+type ResourceID struct {
+ // Parent is the parent ResourceID of this instance.
+ // Can be nil if there is no parent.
+ Parent *ResourceID
+
+ // SubscriptionID is the subscription ID in this resource ID.
+ // The value can be empty if the resource ID does not contain a subscription ID.
+ SubscriptionID string
+
+ // ResourceGroupName is the resource group name in this resource ID.
+ // The value can be empty if the resource ID does not contain a resource group name.
+ ResourceGroupName string
+
+ // Provider represents the provider name in this resource ID.
+ // This is only valid when the resource ID represents a resource provider.
+ // Example: `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Insights`
+ Provider string
+
+ // Location is the location in this resource ID.
+ // The value can be empty if the resource ID does not contain a location name.
+ Location string
+
+ // ResourceType represents the type of this resource ID.
+ ResourceType ResourceType
+
+ // Name is the resource name of this resource ID.
+ Name string
+
+ isChild bool
+ stringValue string
+}
+
+// ParseResourceID parses a string to an instance of ResourceID
+func ParseResourceID(id string) (*ResourceID, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("invalid resource ID: id cannot be empty")
+ }
+
+ if !strings.HasPrefix(id, "/") {
+ return nil, fmt.Errorf("invalid resource ID: resource id '%s' must start with '/'", id)
+ }
+
+ parts := splitStringAndOmitEmpty(id, "/")
+
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ if !strings.EqualFold(parts[0], subscriptionsKey) && !strings.EqualFold(parts[0], providersKey) {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ return appendNext(RootResourceID, parts, id)
+}
+
+// String returns the string of the ResourceID
+func (id *ResourceID) String() string {
+ if len(id.stringValue) > 0 {
+ return id.stringValue
+ }
+
+ if id.Parent == nil {
+ return ""
+ }
+
+ builder := strings.Builder{}
+ builder.WriteString(id.Parent.String())
+
+ if id.isChild {
+ builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType()))
+ if len(id.Name) > 0 {
+ builder.WriteString(fmt.Sprintf("/%s", id.Name))
+ }
+ } else {
+ builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name))
+ }
+
+ id.stringValue = builder.String()
+
+ return id.stringValue
+}
+
+func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID {
+ id := &ResourceID{}
+ id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true)
+ return id
+}
+
+func newResourceIDWithResourceType(parent *ResourceID, resourceType ResourceType, resourceName string) *ResourceID {
+ id := &ResourceID{}
+ id.init(parent, resourceType, resourceName, true)
+ return id
+}
+
+func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTypeName, resourceName string) *ResourceID {
+ id := &ResourceID{}
+ id.init(parent, NewResourceType(providerNamespace, resourceTypeName), resourceName, false)
+ return id
+}
+
+func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType {
+ if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) {
+ return ResourceGroupResourceType
+ } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() {
+ return SubscriptionResourceType
+ }
+
+ return parent.ResourceType.AppendChild(resourceTypeName)
+}
+
+func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name string, isChild bool) {
+ if parent != nil {
+ id.Provider = parent.Provider
+ id.SubscriptionID = parent.SubscriptionID
+ id.ResourceGroupName = parent.ResourceGroupName
+ id.Location = parent.Location
+ }
+
+ if resourceType.String() == SubscriptionResourceType.String() {
+ id.SubscriptionID = name
+ }
+
+ if resourceType.lastType() == locationsKey {
+ id.Location = name
+ }
+
+ if resourceType.String() == ResourceGroupResourceType.String() {
+ id.ResourceGroupName = name
+ }
+
+ if resourceType.String() == ProviderResourceType.String() {
+ id.Provider = name
+ }
+
+ if parent == nil {
+ id.Parent = RootResourceID
+ } else {
+ id.Parent = parent
+ }
+ id.isChild = isChild
+ id.ResourceType = resourceType
+ id.Name = name
+}
+
+func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) {
+ if len(parts) == 0 {
+ return parent, nil
+ }
+
+ if len(parts) == 1 {
+ // subscriptions and resourceGroups are not valid ids without their names
+ if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ // resourceGroup must contain either child or provider resource type
+ if parent.ResourceType.String() == ResourceGroupResourceType.String() {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ return newResourceID(parent, parts[0], ""), nil
+ }
+
+ if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) {
+ //provider resource can only be on a tenant or a subscription parent
+ if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ return appendNext(newResourceIDWithResourceType(parent, ProviderResourceType, parts[1]), parts[2:], id)
+ }
+
+ if len(parts) > 3 && strings.EqualFold(parts[0], providersKey) {
+ return appendNext(newResourceIDWithProvider(parent, parts[1], parts[2], parts[3]), parts[4:], id)
+ }
+
+ if len(parts) > 1 && !strings.EqualFold(parts[0], providersKey) {
+ return appendNext(newResourceID(parent, parts[0], parts[1]), parts[2:], id)
+ }
+
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+}
+
+func splitStringAndOmitEmpty(v, sep string) []string {
+ r := make([]string, 0)
+ for _, s := range strings.Split(v, sep) {
+ if len(s) == 0 {
+ continue
+ }
+ r = append(r, s)
+ }
+
+ return r
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
new file mode 100644
index 00000000000..ca03ac9713d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
@@ -0,0 +1,114 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package resource
+
+import (
+ "fmt"
+ "strings"
+)
+
+// SubscriptionResourceType is the ResourceType of a subscription
+var SubscriptionResourceType = NewResourceType(builtInResourceNamespace, "subscriptions")
+
+// ResourceGroupResourceType is the ResourceType of a resource group
+var ResourceGroupResourceType = NewResourceType(builtInResourceNamespace, "resourceGroups")
+
+// TenantResourceType is the ResourceType of a tenant
+var TenantResourceType = NewResourceType(builtInResourceNamespace, "tenants")
+
+// ProviderResourceType is the ResourceType of a provider
+var ProviderResourceType = NewResourceType(builtInResourceNamespace, "providers")
+
+// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets".
+// Don't create this type directly, use ParseResourceType or NewResourceType instead.
+type ResourceType struct {
+ // Namespace is the namespace of the resource type.
+ // e.g. "Microsoft.Network" in resource type "Microsoft.Network/virtualNetworks/subnets"
+ Namespace string
+
+ // Type is the full type name of the resource type.
+ // e.g. "virtualNetworks/subnets" in resource type "Microsoft.Network/virtualNetworks/subnets"
+ Type string
+
+ // Types is the slice of all the sub-types of this resource type.
+ // e.g. ["virtualNetworks", "subnets"] in resource type "Microsoft.Network/virtualNetworks/subnets"
+ Types []string
+
+ stringValue string
+}
+
+// String returns the string of the ResourceType
+func (t ResourceType) String() string {
+ return t.stringValue
+}
+
+// IsParentOf returns true when the receiver is the parent resource type of the child.
+func (t ResourceType) IsParentOf(child ResourceType) bool {
+ if !strings.EqualFold(t.Namespace, child.Namespace) {
+ return false
+ }
+ if len(t.Types) >= len(child.Types) {
+ return false
+ }
+ for i := range t.Types {
+ if !strings.EqualFold(t.Types[i], child.Types[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// AppendChild creates an instance of ResourceType using the receiver as the parent with childType appended to it.
+func (t ResourceType) AppendChild(childType string) ResourceType {
+ return NewResourceType(t.Namespace, fmt.Sprintf("%s/%s", t.Type, childType))
+}
+
+// NewResourceType creates an instance of ResourceType using a provider namespace
+// such as "Microsoft.Network" and type such as "virtualNetworks/subnets".
+func NewResourceType(providerNamespace, typeName string) ResourceType {
+ return ResourceType{
+ Namespace: providerNamespace,
+ Type: typeName,
+ Types: splitStringAndOmitEmpty(typeName, "/"),
+ stringValue: fmt.Sprintf("%s/%s", providerNamespace, typeName),
+ }
+}
+
+// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets)
+// or a resource identifier string.
+// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet)
+func ParseResourceType(resourceIDOrType string) (ResourceType, error) {
+ // split the path into segments
+ parts := splitStringAndOmitEmpty(resourceIDOrType, "/")
+
+ // There must be at least a namespace and type name
+ if len(parts) < 1 {
+ return ResourceType{}, fmt.Errorf("invalid resource ID or type: %s", resourceIDOrType)
+ }
+
+ // if the type is just subscriptions, it is a built-in type in the Microsoft.Resources namespace
+ if len(parts) == 1 {
+ // Simple resource type
+ return NewResourceType(builtInResourceNamespace, parts[0]), nil
+ } else if strings.Contains(parts[0], ".") {
+ // Handle resource types (Microsoft.Compute/virtualMachines, Microsoft.Network/virtualNetworks/subnets)
+ // it is a full type name
+ return NewResourceType(parts[0], strings.Join(parts[1:], "/")), nil
+ } else {
+ // Check if ResourceID
+ id, err := ParseResourceID(resourceIDOrType)
+ if err != nil {
+ return ResourceType{}, err
+ }
+ return NewResourceType(id.ResourceType.Namespace, id.ResourceType.Type), nil
+ }
+}
+
+func (t ResourceType) lastType() string {
+ return t.Types[len(t.Types)-1]
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
new file mode 100644
index 00000000000..83cf91e3ecb
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
@@ -0,0 +1,98 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package policy
+
+import (
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+// BearerTokenOptions configures the bearer token policy's behavior.
+type BearerTokenOptions struct {
+ // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests.
+ // The policy will add a token from each of these tenants to every request. The
+ // authenticating user or service principal must be a guest in these tenants, and the
+ // policy's credential must support multitenant authentication.
+ AuxiliaryTenants []string
+
+ // Scopes contains the list of permission scopes required for the token.
+ Scopes []string
+}
+
+// RegistrationOptions configures the registration policy's behavior.
+// All zero-value fields will be initialized with their default values.
+type RegistrationOptions struct {
+ policy.ClientOptions
+
+ // MaxAttempts is the total number of times to attempt automatic registration
+ // in the event that an attempt fails.
+ // The default value is 3.
+ // Set to a value less than zero to disable the policy.
+ MaxAttempts int
+
+ // PollingDelay is the amount of time to sleep between polling intervals.
+ // The default value is 15 seconds.
+ // A value less than zero means no delay between polling intervals (not recommended).
+ PollingDelay time.Duration
+
+ // PollingDuration is the amount of time to wait before abandoning polling.
+ // The default valule is 5 minutes.
+ // NOTE: Setting this to a small value might cause the policy to prematurely fail.
+ PollingDuration time.Duration
+}
+
+// ClientOptions contains configuration settings for a client's pipeline.
+type ClientOptions struct {
+ policy.ClientOptions
+
+ // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests.
+ // The client will add a token from each of these tenants to every request. The
+ // authenticating user or service principal must be a guest in these tenants, and the
+ // client's credential must support multitenant authentication.
+ AuxiliaryTenants []string
+
+ // DisableRPRegistration disables the auto-RP registration policy. Defaults to false.
+ DisableRPRegistration bool
+}
+
+// Clone return a deep copy of the current options.
+func (o *ClientOptions) Clone() *ClientOptions {
+ if o == nil {
+ return nil
+ }
+ copiedOptions := *o
+ copiedOptions.Cloud.Services = copyMap(copiedOptions.Cloud.Services)
+ copiedOptions.Logging.AllowedHeaders = copyArray(copiedOptions.Logging.AllowedHeaders)
+ copiedOptions.Logging.AllowedQueryParams = copyArray(copiedOptions.Logging.AllowedQueryParams)
+ copiedOptions.Retry.StatusCodes = copyArray(copiedOptions.Retry.StatusCodes)
+ copiedOptions.PerRetryPolicies = copyArray(copiedOptions.PerRetryPolicies)
+ copiedOptions.PerCallPolicies = copyArray(copiedOptions.PerCallPolicies)
+ return &copiedOptions
+}
+
+// copyMap return a new map with all the key value pair in the src map
+func copyMap[K comparable, V any](src map[K]V) map[K]V {
+ if src == nil {
+ return nil
+ }
+ copiedMap := make(map[K]V)
+ for k, v := range src {
+ copiedMap[k] = v
+ }
+ return copiedMap
+}
+
+// copyMap return a new array with all the elements in the src array
+func copyArray[T any](src []T) []T {
+ if src == nil {
+ return nil
+ }
+ copiedArray := make([]T, len(src))
+ copy(copiedArray, src)
+ return copiedArray
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
new file mode 100644
index 00000000000..302c19cd426
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
@@ -0,0 +1,65 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+)
+
+// NewPipeline creates a pipeline from connection options. Policies from ClientOptions are
+// placed after policies from PipelineOptions. The telemetry policy, when enabled, will
+// use the specified module and version info.
+func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azruntime.PipelineOptions, options *armpolicy.ClientOptions) (azruntime.Pipeline, error) {
+ if options == nil {
+ options = &armpolicy.ClientOptions{}
+ }
+ conf, err := getConfiguration(&options.ClientOptions)
+ if err != nil {
+ return azruntime.Pipeline{}, err
+ }
+ authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{
+ AuxiliaryTenants: options.AuxiliaryTenants,
+ Scopes: []string{conf.Audience + "/.default"},
+ })
+ perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1)
+ copy(perRetry, plOpts.PerRetry)
+ plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy))
+ if !options.DisableRPRegistration {
+ regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions}
+ regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts)
+ if err != nil {
+ return azruntime.Pipeline{}, err
+ }
+ perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1)
+ copy(perCall, plOpts.PerCall)
+ plOpts.PerCall = append(perCall, regPolicy)
+ }
+ if plOpts.APIVersion.Name == "" {
+ plOpts.APIVersion.Name = "api-version"
+ }
+ return azruntime.NewPipeline(module, version, plOpts, &options.ClientOptions), nil
+}
+
+func getConfiguration(o *azpolicy.ClientOptions) (cloud.ServiceConfiguration, error) {
+ c := cloud.AzurePublic
+ if !reflect.ValueOf(o.Cloud).IsZero() {
+ c = o.Cloud
+ }
+ if conf, ok := c.Services[cloud.ResourceManager]; ok && conf.Endpoint != "" && conf.Audience != "" {
+ return conf, nil
+ } else {
+ return conf, errors.New("provided Cloud field is missing Azure Resource Manager configuration")
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
new file mode 100644
index 00000000000..54b3bb78d85
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
@@ -0,0 +1,145 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
+)
+
+const headerAuxiliaryAuthorization = "x-ms-authorization-auxiliary"
+
+// acquiringResourceState holds data for an auxiliary token request
+type acquiringResourceState struct {
+ ctx context.Context
+ p *BearerTokenPolicy
+ tenant string
+}
+
+// acquireAuxToken acquires a token from an auxiliary tenant. Only one thread/goroutine at a time ever calls this function.
+func acquireAuxToken(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) {
+ tk, err := state.p.cred.GetToken(state.ctx, azpolicy.TokenRequestOptions{
+ EnableCAE: true,
+ Scopes: state.p.scopes,
+ TenantID: state.tenant,
+ })
+ if err != nil {
+ return azcore.AccessToken{}, time.Time{}, err
+ }
+ return tk, tk.ExpiresOn, nil
+}
+
+// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
+type BearerTokenPolicy struct {
+ auxResources map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState]
+ btp *azruntime.BearerTokenPolicy
+ cred azcore.TokenCredential
+ scopes []string
+}
+
+// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
+// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
+// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
+func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTokenOptions) *BearerTokenPolicy {
+ if opts == nil {
+ opts = &armpolicy.BearerTokenOptions{}
+ }
+ p := &BearerTokenPolicy{cred: cred}
+ p.auxResources = make(map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState], len(opts.AuxiliaryTenants))
+ for _, t := range opts.AuxiliaryTenants {
+ p.auxResources[t] = temporal.NewResource(acquireAuxToken)
+ }
+ p.scopes = make([]string, len(opts.Scopes))
+ copy(p.scopes, opts.Scopes)
+ p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
+ AuthorizationHandler: azpolicy.AuthorizationHandler{
+ OnChallenge: p.onChallenge,
+ OnRequest: p.onRequest,
+ },
+ })
+ return p
+}
+
+func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error {
+ challenge := res.Header.Get(shared.HeaderWWWAuthenticate)
+ claims, err := parseChallenge(challenge)
+ if err != nil {
+ // the challenge contains claims we can't parse
+ return err
+ } else if claims != "" {
+ // request a new token having the specified claims, send the request again
+ return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes})
+ }
+ // auth challenge didn't include claims, so this is a simple authorization failure
+ return azruntime.NewResponseError(res)
+}
+
+// onRequest authorizes requests with one or more bearer tokens
+func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error {
+ // authorize the request with a token for the primary tenant
+ err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes})
+ if err != nil || len(b.auxResources) == 0 {
+ return err
+ }
+ // add tokens for auxiliary tenants
+ as := acquiringResourceState{
+ ctx: req.Raw().Context(),
+ p: b,
+ }
+ auxTokens := make([]string, 0, len(b.auxResources))
+ for tenant, er := range b.auxResources {
+ as.tenant = tenant
+ auxTk, err := er.Get(as)
+ if err != nil {
+ return err
+ }
+ auxTokens = append(auxTokens, fmt.Sprintf("%s%s", shared.BearerTokenPrefix, auxTk.Token))
+ }
+ req.Raw().Header.Set(headerAuxiliaryAuthorization, strings.Join(auxTokens, ", "))
+ return nil
+}
+
+// Do authorizes a request with a bearer token
+func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
+ return b.btp.Do(req)
+}
+
+// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token
+// that will satisfy conditional access policies. It returns a non-nil error when the given value contains
+// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error.
+func parseChallenge(wwwAuthenticate string) (string, error) {
+ claims := ""
+ var err error
+ for _, param := range strings.Split(wwwAuthenticate, ",") {
+ if _, after, found := strings.Cut(param, "claims="); found {
+ if claims != "" {
+ // The header contains multiple challenges, at least two of which specify claims. The specs allow this
+ // but it's unclear what a client should do in this case and there's as yet no concrete example of it.
+ err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate)
+ break
+ }
+ // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded
+ claims = strings.Trim(after, `\"=`)
+ // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42"
+ if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil {
+ claims = string(b)
+ } else {
+ err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate)
+ break
+ }
+ }
+ }
+ return claims, err
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
new file mode 100644
index 00000000000..83e15949aa3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
@@ -0,0 +1,347 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const (
+ // LogRPRegistration entries contain information specific to the automatic registration of an RP.
+ // Entries of this classification are written IFF the policy needs to take any action.
+ LogRPRegistration log.Event = "RPRegistration"
+)
+
+// init sets any default values
+func setDefaults(r *armpolicy.RegistrationOptions) {
+ if r.MaxAttempts == 0 {
+ r.MaxAttempts = 3
+ } else if r.MaxAttempts < 0 {
+ r.MaxAttempts = 0
+ }
+ if r.PollingDelay == 0 {
+ r.PollingDelay = 15 * time.Second
+ } else if r.PollingDelay < 0 {
+ r.PollingDelay = 0
+ }
+ if r.PollingDuration == 0 {
+ r.PollingDuration = 5 * time.Minute
+ }
+}
+
+// NewRPRegistrationPolicy creates a policy object configured using the specified options.
+// The policy controls whether an unregistered resource provider should automatically be
+// registered. See https://aka.ms/rps-not-found for more information.
+func NewRPRegistrationPolicy(cred azcore.TokenCredential, o *armpolicy.RegistrationOptions) (azpolicy.Policy, error) {
+ if o == nil {
+ o = &armpolicy.RegistrationOptions{}
+ }
+ conf, err := getConfiguration(&o.ClientOptions)
+ if err != nil {
+ return nil, err
+ }
+ authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{Scopes: []string{conf.Audience + "/.default"}})
+ p := &rpRegistrationPolicy{
+ endpoint: conf.Endpoint,
+ pipeline: runtime.NewPipeline(shared.Module, shared.Version, runtime.PipelineOptions{PerRetry: []azpolicy.Policy{authPolicy}}, &o.ClientOptions),
+ options: *o,
+ }
+ // init the copy
+ setDefaults(&p.options)
+ return p, nil
+}
+
+type rpRegistrationPolicy struct {
+ endpoint string
+ pipeline runtime.Pipeline
+ options armpolicy.RegistrationOptions
+}
+
+func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
+ if r.options.MaxAttempts == 0 {
+ // policy is disabled
+ return req.Next()
+ }
+ const registeredState = "Registered"
+ var rp string
+ var resp *http.Response
+ for attempts := 0; attempts < r.options.MaxAttempts; attempts++ {
+ var err error
+ // make the original request
+ resp, err = req.Next()
+ // getting a 409 is the first indication that the RP might need to be registered, check error response
+ if err != nil || resp.StatusCode != http.StatusConflict {
+ return resp, err
+ }
+ var reqErr requestError
+ if err = runtime.UnmarshalAsJSON(resp, &reqErr); err != nil {
+ return resp, err
+ }
+ if reqErr.ServiceError == nil {
+ // missing service error info. just return the response
+ // to the caller so its error unmarshalling will kick in
+ return resp, err
+ }
+ if !isUnregisteredRPCode(reqErr.ServiceError.Code) {
+ // not a 409 due to unregistered RP. just return the response
+ // to the caller so its error unmarshalling will kick in
+ return resp, err
+ }
+ // RP needs to be registered. start by getting the subscription ID from the original request
+ subID, err := getSubscription(req.Raw().URL.Path)
+ if err != nil {
+ return resp, err
+ }
+ // now get the RP from the error
+ rp, err = getProvider(reqErr)
+ if err != nil {
+ return resp, err
+ }
+ logRegistrationExit := func(v interface{}) {
+ log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v)
+ }
+ log.Writef(LogRPRegistration, "BEGIN registration for %s", rp)
+ // create client and make the registration request
+ // we use the scheme and host from the original request
+ rpOps := &providersOperations{
+ p: r.pipeline,
+ u: r.endpoint,
+ subID: subID,
+ }
+ if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil {
+ logRegistrationExit(err)
+ return resp, err
+ }
+
+ // RP was registered, however we need to wait for the registration to complete
+ pollCtx, pollCancel := context.WithTimeout(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, r.options.PollingDuration)
+ var lastRegState string
+ for {
+ // get the current registration state
+ getResp, err := rpOps.Get(pollCtx, rp)
+ if err != nil {
+ pollCancel()
+ logRegistrationExit(err)
+ return resp, err
+ }
+ if getResp.Provider.RegistrationState != nil && !strings.EqualFold(*getResp.Provider.RegistrationState, lastRegState) {
+ // registration state has changed, or was updated for the first time
+ lastRegState = *getResp.Provider.RegistrationState
+ log.Writef(LogRPRegistration, "registration state is %s", lastRegState)
+ }
+ if strings.EqualFold(lastRegState, registeredState) {
+ // registration complete
+ pollCancel()
+ logRegistrationExit(lastRegState)
+ break
+ }
+ // wait before trying again
+ select {
+ case <-time.After(r.options.PollingDelay):
+ // continue polling
+ case <-pollCtx.Done():
+ pollCancel()
+ logRegistrationExit(pollCtx.Err())
+ return resp, pollCtx.Err()
+ }
+ }
+ // RP was successfully registered, retry the original request
+ err = req.RewindBody()
+ if err != nil {
+ return resp, err
+ }
+ }
+ // if we get here it means we exceeded the number of attempts
+ return resp, fmt.Errorf("exceeded attempts to register %s", rp)
+}
+
+var unregisteredRPCodes = []string{
+ "MissingSubscriptionRegistration",
+ "MissingRegistrationForResourceProvider",
+ "Subscription Not Registered",
+ "SubscriptionNotRegistered",
+}
+
+func isUnregisteredRPCode(errorCode string) bool {
+ for _, code := range unregisteredRPCodes {
+ if strings.EqualFold(errorCode, code) {
+ return true
+ }
+ }
+ return false
+}
+
+func getSubscription(path string) (string, error) {
+ parts := strings.Split(path, "/")
+ for i, v := range parts {
+ if v == "subscriptions" && (i+1) < len(parts) {
+ return parts[i+1], nil
+ }
+ }
+ return "", fmt.Errorf("failed to obtain subscription ID from %s", path)
+}
+
+func getProvider(re requestError) (string, error) {
+ if len(re.ServiceError.Details) > 0 {
+ return re.ServiceError.Details[0].Target, nil
+ }
+ return "", errors.New("unexpected empty Details")
+}
+
+// minimal error definitions to simplify detection
+type requestError struct {
+ ServiceError *serviceError `json:"error"`
+}
+
+type serviceError struct {
+ Code string `json:"code"`
+ Details []serviceErrorDetails `json:"details"`
+}
+
+type serviceErrorDetails struct {
+ Code string `json:"code"`
+ Target string `json:"target"`
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// the following code was copied from module armresources, providers.go and models.go
+// only the minimum amount of code was copied to get this working and some edits were made.
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+type providersOperations struct {
+ p runtime.Pipeline
+ u string
+ subID string
+}
+
+// Get - Gets the specified resource provider.
+func (client *providersOperations) Get(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) {
+ req, err := client.getCreateRequest(ctx, resourceProviderNamespace)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ resp, err := client.p.Do(req)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ result, err := client.getHandleResponse(resp)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, nil
+}
+
+// getCreateRequest creates the Get request.
+func (client *providersOperations) getCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) {
+ urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"
+ urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace))
+ urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID))
+ req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.u, urlPath))
+ if err != nil {
+ return nil, err
+ }
+ query := req.Raw().URL.Query()
+ query.Set("api-version", "2019-05-01")
+ req.Raw().URL.RawQuery = query.Encode()
+ return req, nil
+}
+
+// getHandleResponse handles the Get response.
+func (client *providersOperations) getHandleResponse(resp *http.Response) (providerResponse, error) {
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return providerResponse{}, exported.NewResponseError(resp)
+ }
+ result := providerResponse{RawResponse: resp}
+ err := runtime.UnmarshalAsJSON(resp, &result.Provider)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, err
+}
+
+// Register - Registers a subscription with a resource provider.
+func (client *providersOperations) Register(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) {
+ req, err := client.registerCreateRequest(ctx, resourceProviderNamespace)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ resp, err := client.p.Do(req)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ result, err := client.registerHandleResponse(resp)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, nil
+}
+
+// registerCreateRequest creates the Register request.
+func (client *providersOperations) registerCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) {
+ urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"
+ urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace))
+ urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID))
+ req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.u, urlPath))
+ if err != nil {
+ return nil, err
+ }
+ query := req.Raw().URL.Query()
+ query.Set("api-version", "2019-05-01")
+ req.Raw().URL.RawQuery = query.Encode()
+ return req, nil
+}
+
+// registerHandleResponse handles the Register response.
+func (client *providersOperations) registerHandleResponse(resp *http.Response) (providerResponse, error) {
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return providerResponse{}, exported.NewResponseError(resp)
+ }
+ result := providerResponse{RawResponse: resp}
+ err := runtime.UnmarshalAsJSON(resp, &result.Provider)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, err
+}
+
+// ProviderResponse is the response envelope for operations that return a Provider type.
+type providerResponse struct {
+ // Resource provider information.
+ Provider *provider
+
+ // RawResponse contains the underlying HTTP response.
+ RawResponse *http.Response
+}
+
+// Provider - Resource provider information.
+type provider struct {
+ // The provider ID.
+ ID *string `json:"id,omitempty"`
+
+ // The namespace of the resource provider.
+ Namespace *string `json:"namespace,omitempty"`
+
+ // The registration policy of the resource provider.
+ RegistrationPolicy *string `json:"registrationPolicy,omitempty"`
+
+ // The registration state of the resource provider.
+ RegistrationState *string `json:"registrationState,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go
new file mode 100644
index 00000000000..6cea184240f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go
@@ -0,0 +1,30 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
+)
+
+// httpTraceNamespacePolicy is a policy that adds the az.namespace attribute to the current Span
+func httpTraceNamespacePolicy(req *policy.Request) (resp *http.Response, err error) {
+ rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{})
+ if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() {
+ rt, err := resource.ParseResourceType(req.Raw().URL.Path)
+ if err == nil {
+ // add the namespace attribute to the current span
+ span := tracer.SpanFromContext(req.Raw().Context())
+ span.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: rt.Namespace})
+ }
+ }
+ return req.Next()
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
new file mode 100644
index 00000000000..1400d43799f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
@@ -0,0 +1,24 @@
+//go:build go1.16
+// +build go1.16
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+
+func init() {
+ cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Audience: "https://management.core.chinacloudapi.cn",
+ Endpoint: "https://management.chinacloudapi.cn",
+ }
+ cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Audience: "https://management.core.usgovcloudapi.net",
+ Endpoint: "https://management.usgovcloudapi.net",
+ }
+ cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Audience: "https://management.core.windows.net/",
+ Endpoint: "https://management.azure.com",
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
index 659f2a7d2ea..8d1ae213c95 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
@@ -125,46 +125,11 @@ func (req *Request) OperationValue(value interface{}) bool {
// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length
// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "",
-// Content-Type won't be set.
+// Content-Type won't be set, and if it was set, will be deleted.
// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser.
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
- var err error
- var size int64
- if body != nil {
- size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
- if err != nil {
- return err
- }
- }
- if size == 0 {
- // treat an empty stream the same as a nil one: assign req a nil body
- body = nil
- // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
- // (Del is a no-op when the header has no value)
- req.req.Header.Del(shared.HeaderContentLength)
- } else {
- _, err = body.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
- req.Raw().GetBody = func() (io.ReadCloser, error) {
- _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
- return body, err
- }
- }
- // keep a copy of the body argument. this is to handle cases
- // where req.Body is replaced, e.g. httputil.DumpRequest and friends.
- req.body = body
- req.req.Body = body
- req.req.ContentLength = size
- if contentType == "" {
- // Del is a no-op when the header has no value
- req.req.Header.Del(shared.HeaderContentType)
- } else {
- req.req.Header.Set(shared.HeaderContentType, contentType)
- }
- return nil
+ // clobber the existing Content-Type to preserve behavior
+ return SetBody(req, body, contentType, true)
}
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
@@ -211,3 +176,48 @@ type PolicyFunc func(*Request) (*http.Response, error)
func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
return pf(req)
}
+
+// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly.
+// - req is the request to modify
+// - body is the request body; if nil or empty, Content-Length won't be set
+// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted
+// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType
+func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error {
+ var err error
+ var size int64
+ if body != nil {
+ size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
+ if err != nil {
+ return err
+ }
+ }
+ if size == 0 {
+ // treat an empty stream the same as a nil one: assign req a nil body
+ body = nil
+ // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
+ // (Del is a no-op when the header has no value)
+ req.req.Header.Del(shared.HeaderContentLength)
+ } else {
+ _, err = body.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
+ req.Raw().GetBody = func() (io.ReadCloser, error) {
+ _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
+ return body, err
+ }
+ }
+ // keep a copy of the body argument. this is to handle cases
+ // where req.Body is replaced, e.g. httputil.DumpRequest and friends.
+ req.body = body
+ req.req.Body = body
+ req.req.ContentLength = size
+ if contentType == "" {
+ // Del is a no-op when the header has no value
+ req.req.Header.Del(shared.HeaderContentType)
+ } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType {
+ req.req.Header.Set(shared.HeaderContentType, contentType)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 272f06155ea..8f749f48d9b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -22,11 +22,13 @@ const (
HeaderLocation = "Location"
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
+ HeaderRetryAfterMS = "Retry-After-Ms"
HeaderUserAgent = "User-Agent"
HeaderWWWAuthenticate = "WWW-Authenticate"
HeaderXMSClientRequestID = "x-ms-client-request-id"
HeaderXMSRequestID = "x-ms-request-id"
HeaderXMSErrorCode = "x-ms-error-code"
+ HeaderXMSRetryAfterMS = "x-ms-retry-after-ms"
)
const BearerTokenPrefix = "Bearer "
@@ -38,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.9.0"
+ Version = "v1.9.2"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
index 16bc105f481..d3da2c5fdfa 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
@@ -44,22 +44,64 @@ func Delay(ctx context.Context, delay time.Duration) error {
}
}
-// RetryAfter returns non-zero if the response contains a Retry-After header value.
+// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value.
+// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after
func RetryAfter(resp *http.Response) time.Duration {
if resp == nil {
return 0
}
- ra := resp.Header.Get(HeaderRetryAfter)
- if ra == "" {
- return 0
+
+ type retryData struct {
+ header string
+ units time.Duration
+
+ // custom is used when the regular algorithm failed and is optional.
+ // the returned duration is used verbatim (units is not applied).
+ custom func(string) time.Duration
}
- // retry-after values are expressed in either number of
- // seconds or an HTTP-date indicating when to try again
- if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
- return time.Duration(retryAfter) * time.Second
- } else if t, err := time.Parse(time.RFC1123, ra); err == nil {
- return time.Until(t)
+
+ nop := func(string) time.Duration { return 0 }
+
+ // the headers are listed in order of preference
+ retries := []retryData{
+ {
+ header: HeaderRetryAfterMS,
+ units: time.Millisecond,
+ custom: nop,
+ },
+ {
+ header: HeaderXMSRetryAfterMS,
+ units: time.Millisecond,
+ custom: nop,
+ },
+ {
+ header: HeaderRetryAfter,
+ units: time.Second,
+
+ // retry-after values are expressed in either number of
+ // seconds or an HTTP-date indicating when to try again
+ custom: func(ra string) time.Duration {
+ t, err := time.Parse(time.RFC1123, ra)
+ if err != nil {
+ return 0
+ }
+ return time.Until(t)
+ },
+ },
}
+
+ for _, retry := range retries {
+ v := resp.Header.Get(retry.header)
+ if v == "" {
+ continue
+ }
+ if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 {
+ return time.Duration(retryAfter) * retry.units
+ } else if d := retry.custom(v); d > 0 {
+ return d
+ }
+ }
+
return 0
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
index e97223da29e..5d1569c8dd2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
@@ -97,7 +97,8 @@ func EncodeByteArray(v []byte, format Base64Encoding) string {
func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error {
// send as a JSON string
encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format))
- return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON)
+ // tsp generated code can set Content-Type so we must prefer that
+ return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false)
}
// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
@@ -106,7 +107,8 @@ func MarshalAsJSON(req *policy.Request, v interface{}) error {
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
- return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON)
+ // tsp generated code can set Content-Type so we must prefer that
+ return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false)
}
// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index 7ea119ab30d..71dcb5f3e95 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,5 +1,38 @@
# Release History
+## 1.5.1 (2024-01-17)
+
+### Bugs Fixed
+* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly
+
+## 1.5.0 (2024-01-16)
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.5.0-beta.1
+* Removed persistent token caching. It will return in v1.6.0-beta.1
+
+### Bugs Fixed
+* Credentials now preserve MSAL headers e.g. X-Client-Sku
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.5.0-beta.2 (2023-11-07)
+
+### Features Added
+* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity
+* Added spans for distributed tracing.
+
+## 1.5.0-beta.1 (2023-10-10)
+
+### Features Added
+* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions`
+ on a credential's options to enable and configure this. See the package documentation for
+ this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more
+ details.
+* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This
+ credential is also part of the `DefaultAzureCredential` authentication flow.
+
## 1.4.0 (2023-10-10)
### Bugs Fixed
@@ -94,14 +127,14 @@
### Features Added
* By default, credentials set client capability "CP1" to enable support for
[Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation).
- This indicates to Azure Active Directory that your application can handle CAE claims challenges.
+ This indicates to Microsoft Entra ID that your application can handle CAE claims challenges.
You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true".
* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login
prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599))
* Service principal and user credentials support ADFS authentication on Azure Stack.
Specify "adfs" as the credential's tenant.
* Applications running in private or disconnected clouds can prevent credentials from
- requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery`
+ requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery`
field on credential options.
* Many credentials can now be configured to authenticate in multiple tenants. The
options types for these credentials have an `AdditionallyAllowedTenants` field
@@ -454,4 +487,4 @@
## 0.1.0 (2020-07-23)
### Features Added
-* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK.
+* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
index 4ac53eb7b27..1a649202303 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
@@ -1,6 +1,6 @@
# Migrating from autorest/adal to azidentity
-`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead.
+`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead.
This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`.
@@ -18,7 +18,7 @@ This guide shows common authentication code using `autorest/adal` and its equiva
### `autorest/adal`
-Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant:
+Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant:
```go
import "github.com/Azure/go-autorest/autorest/adal"
@@ -284,7 +284,7 @@ if err == nil {
}
```
-Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent).
+Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview).
## Use azidentity credentials with older packages
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
index da0baa9add3..b6ad2d39f84 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -1,9 +1,9 @@
# Azure Identity Client Module for Go
-The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
+The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity)
-| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/)
+| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/)
| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity)
# Getting started
@@ -35,6 +35,12 @@ signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in t
When no default browser is available, `az login` will use the device code
authentication flow. This can also be selected manually by running `az login --use-device-code`.
+#### Authenticate via the Azure Developer CLI
+
+Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally.
+
+To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow.
+
## Key concepts
### Credentials
@@ -44,9 +50,7 @@ service client to authenticate requests. Service clients across the Azure SDK
accept a credential instance when they are constructed, and use that credential
to authenticate requests.
-The `azidentity` module focuses on OAuth authentication with Azure Active
-Directory (AAD). It offers a variety of credential types capable of acquiring
-an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types.
+The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types.
### DefaultAzureCredential
@@ -58,6 +62,7 @@ an Azure AD access token. See [Credential Types](#credential-types "Credential T
1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity.
1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
+1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account.
> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
@@ -152,6 +157,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|Credential|Usage
|-|-
|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI
+|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
## Environment Variables
@@ -161,16 +167,16 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|variable name|value
|-|-
-|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application
-|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant
+|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
+|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
|`AZURE_CLIENT_SECRET`|one of the application's client secrets
#### Service principal with certificate
|variable name|value
|-|-
-|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application
-|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant
+|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
+|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key
|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any
@@ -178,22 +184,30 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|variable name|value
|-|-
-|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application
+|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
|`AZURE_USERNAME`|a username (usually an email address)
|`AZURE_PASSWORD`|that user's password
Configuration is attempted in the above order. For example, if values for a
client secret and certificate are both present, the client secret will be used.
+## Token caching
+
+Token caching is an `azidentity` feature that allows apps to:
+
+* Cache tokens in memory (default) or on disk (opt-in).
+* Improve resilience and performance.
+* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
+
+For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching).
+
## Troubleshooting
### Error Handling
Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot).
-For more details on handling specific Azure Active Directory errors please refer to the
-Azure Active Directory
-[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes).
+For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes).
### Logging
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
new file mode 100644
index 00000000000..c0d6601469c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
@@ -0,0 +1,70 @@
+## Token caching in the Azure Identity client module
+
+*Token caching* is a feature provided by the Azure Identity library that allows apps to:
+
+- Improve their resilience and performance.
+- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
+- Reduce the number of times the user is prompted to authenticate.
+
+When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token.
+
+Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested.
+
+### In-memory token caching
+
+*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe.
+
+**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library.
+
+#### Caching cannot be disabled
+
+As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance.
+
+### Persistent token caching
+
+> Only azidentity v1.5.0-beta versions support persistent token caching
+
+*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems.
+
+| Operating system | Storage mechanism |
+|------------------|---------------------------------------|
+| Linux | kernel key retention service (keyctl) |
+| macOS | Keychain |
+| Windows | DPAPI |
+
+By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform.
+However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access.
+
+With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which:
+
+- Makes the app more resilient to failures.
+- Ensures the app can continue to function during an Entra ID outage or disruption.
+- Avoids having to prompt users to authenticate each time the process is restarted.
+
+>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains.
+
+#### Example code
+
+See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data.
+
+### Credentials supporting token caching
+
+The following table indicates the state of in-memory and persistent caching in each credential type.
+
+**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache).
+
+| Credential | In-memory token caching | Persistent token caching |
+|--------------------------------|---------------------------------------------------------------------|--------------------------|
+| `AzureCLICredential` | Not Supported | Not Supported |
+| `AzureDeveloperCLICredential` | Not Supported | Not Supported |
+| `ClientAssertionCredential` | Supported | Supported |
+| `ClientCertificateCredential` | Supported | Supported |
+| `ClientSecretCredential` | Supported | Supported |
+| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported |
+| `DeviceCodeCredential` | Supported | Supported |
+| `EnvironmentCredential` | Supported | Not Supported |
+| `InteractiveBrowserCredential` | Supported | Supported |
+| `ManagedIdentityCredential` | Supported | Not Supported |
+| `OnBehalfOfCredential` | Supported | Supported |
+| `UsernamePasswordCredential` | Supported | Supported |
+| `WorkloadIdentityCredential` | Supported | Supported |
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
index fef099813c8..832c599eb90 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -8,7 +8,8 @@ This troubleshooting guide covers failure investigation techniques, common error
- [Permission issues](#permission-issues)
- [Find relevant information in errors](#find-relevant-information-in-errors)
- [Enable and configure logging](#enable-and-configure-logging)
-- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
+- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
+- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues)
- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues)
- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues)
- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues)
@@ -23,7 +24,7 @@ This troubleshooting guide covers failure investigation techniques, common error
## Handle azidentity errors
-Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable.
+Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable.
### Permission issues
@@ -31,7 +32,7 @@ Service client errors with a status code of 401 or 403 often indicate that authe
## Find relevant information in errors
-Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message:
+Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message:
```
ClientSecretCredential authentication failed
@@ -57,9 +58,9 @@ This error contains several pieces of information:
- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`.
-- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes.
+- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes.
-- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures.
+- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures.
### Enable and configure logging
@@ -96,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication)
| Error Code | Issue | Mitigation |
|---|---|---|
-|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
-|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
-|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
+|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
+|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
+|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
## Troubleshoot ClientCertificateCredential authentication issues
| Error Code | Description | Mitigation |
|---|---|---|
-|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).|
-|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
+|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).|
+|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
## Troubleshoot UsernamePasswordCredential authentication issues
@@ -172,7 +173,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio
|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response).
-## Troubleshoot AzureCliCredential authentication issues
+## Troubleshoot AzureCLICredential authentication issues
| Error Message |Description| Mitigation |
|---|---|---|
@@ -195,6 +196,29 @@ az account get-access-token --output json --resource https://management.core.win
> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
+
+## Troubleshoot AzureDeveloperCLICredential authentication issues
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|- Ensure the Azure Developer CLI is properly installed. See the installation instructions at [Install or update the Azure Developer CLI](https://learn.microsoft.com/azure/developer/azure-developer-cli/install-azd).
- Validate the installation location has been added to the `PATH` environment variable.
|
+|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|- Log in to the Azure Developer CLI using the `azd login` command.
- Validate that the Azure Developer CLI can obtain tokens. For instructions, see [Verify the Azure Developer CLI can obtain tokens](#verify-the-azure-developer-cli-can-obtain-tokens).
|
+
+#### Verify the Azure Developer CLI can obtain tokens
+
+You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI.
+
+```sh
+azd config list
+```
+
+Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account.
+
+```sh
+azd auth token --output json --scope https://management.core.windows.net/.default
+```
+>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security.
+
## Troubleshoot `WorkloadIdentityCredential` authentication issues
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
index 47e77f88e3f..173ce2b3cda 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
- "Tag": "go/azidentity_6225ab0470"
+ "Tag": "go/azidentity_db4a26f583"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
new file mode 100644
index 00000000000..ada4d6501d2
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
@@ -0,0 +1,95 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
+)
+
+var supportedAuthRecordVersions = []string{"1.0"}
+
+// authenticationRecord is non-secret account information about an authenticated user that user credentials such as
+// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication
+// data. Call these credentials' Authenticate method to get an authenticationRecord for a user.
+type authenticationRecord struct {
+ // Authority is the URL of the authority that issued the token.
+ Authority string `json:"authority"`
+
+ // ClientID is the ID of the application that authenticated the user.
+ ClientID string `json:"clientId"`
+
+ // HomeAccountID uniquely identifies the account.
+ HomeAccountID string `json:"homeAccountId"`
+
+ // TenantID identifies the tenant in which the user authenticated.
+ TenantID string `json:"tenantId"`
+
+ // Username is the user's preferred username.
+ Username string `json:"username"`
+
+ // Version of the AuthenticationRecord.
+ Version string `json:"version"`
+}
+
+// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord
+func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
+ // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we
+ // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally
+ // different type enables this by assigning all the fields without recursing into this method.
+ type r authenticationRecord
+ err := json.Unmarshal(b, (*r)(a))
+ if err != nil {
+ return err
+ }
+ if a.Version == "" {
+ return errors.New("AuthenticationRecord must have a version")
+ }
+ for _, v := range supportedAuthRecordVersions {
+ if a.Version == v {
+ return nil
+ }
+ }
+ return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions)
+}
+
+// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued.
+func (a *authenticationRecord) account() public.Account {
+ return public.Account{
+ Environment: a.Authority,
+ HomeAccountID: a.HomeAccountID,
+ PreferredUsername: a.Username,
+ }
+}
+
+func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) {
+ u, err := url.Parse(ar.IDToken.Issuer)
+ if err != nil {
+ return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
+ }
+ tenant := ar.IDToken.TenantID
+ if tenant == "" {
+ tenant = strings.Trim(u.Path, "/")
+ }
+ username := ar.IDToken.PreferredUsername
+ if username == "" {
+ username = ar.IDToken.UPN
+ }
+ return authenticationRecord{
+ Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host),
+ ClientID: ar.IDToken.Audience,
+ HomeAccountID: ar.Account.HomeAccountID,
+ TenantID: tenant,
+ Username: username,
+ Version: "1.0",
+ }, nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
index 10b742ce1a1..c3bcfb56c0a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
@@ -15,12 +15,12 @@ import (
"net/http"
"net/url"
"os"
- "regexp"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
)
@@ -41,6 +41,10 @@ const (
organizationsTenantID = "organizations"
developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
defaultSuffix = "/.default"
+
+ traceNamespace = "Microsoft.Entra"
+ traceOpGetToken = "GetToken"
+ traceOpAuthenticate = "Authenticate"
)
var (
@@ -49,6 +53,9 @@ var (
errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names")
)
+// tokenCachePersistenceOptions contains options for persistent token caching
+type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions
+
// setAuthorityHost initializes the authority host for credentials. Precedence is:
// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user
// 2. value of AZURE_AUTHORITY_HOST
@@ -109,29 +116,20 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants
return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified)
}
-// validTenantID return true is it receives a valid tenantID, returns false otherwise
-func validTenantID(tenantID string) bool {
- match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID)
- if err != nil {
- return false
- }
- return match
-}
-
-func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter {
- pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts)
- return pipelineAdapter{pl: pl}
-}
-
-type pipelineAdapter struct {
- pl runtime.Pipeline
+func alphanumeric(r rune) bool {
+ return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
}
-func (p pipelineAdapter) CloseIdleConnections() {
- // do nothing
+func validTenantID(tenantID string) bool {
+ for _, r := range tenantID {
+ if !(alphanumeric(r) || r == '.' || r == '-') {
+ return false
+ }
+ }
+ return true
}
-func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) {
+func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) {
req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String())
if err != nil {
return nil, err
@@ -153,7 +151,18 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) {
return nil, err
}
}
- resp, err := p.pl.Do(req)
+
+ // copy headers to the new request, ignoring any for which the new request has a value
+ h := req.Raw().Header
+ for key, vals := range r.Header {
+ if _, has := h[key]; !has {
+ for _, val := range vals {
+ h.Add(key, val)
+ }
+ }
+ }
+
+ resp, err := client.Pipeline().Do(req)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
index 55a0d654347..43577ab3c5f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
@@ -14,7 +14,6 @@ import (
"fmt"
"os"
"os/exec"
- "regexp"
"runtime"
"strings"
"sync"
@@ -25,13 +24,9 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
-const (
- credNameAzureCLI = "AzureCLICredential"
- timeoutCLIRequest = 10 * time.Second
-)
+const credNameAzureCLI = "AzureCLICredential"
-// used by tests to fake invoking the CLI
-type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error)
+type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error)
// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
type AzureCLICredentialOptions struct {
@@ -39,17 +34,25 @@ type AzureCLICredentialOptions struct {
// to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
// logged in account can access.
AdditionallyAllowedTenants []string
+
+ // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other
+ // than the Azure CLI's current account.
+ subscription string
+
// TenantID identifies the tenant the credential should authenticate in.
// Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user.
TenantID string
- tokenProvider azureCLITokenProvider
+ // inDefaultChain is true when the credential is part of DefaultAzureCredential
+ inDefaultChain bool
+ // tokenProvider is used by tests to fake invoking az
+ tokenProvider azTokenProvider
}
// init returns an instance of AzureCLICredentialOptions initialized with default values.
func (o *AzureCLICredentialOptions) init() {
if o.tokenProvider == nil {
- o.tokenProvider = defaultTokenProvider
+ o.tokenProvider = defaultAzTokenProvider
}
}
@@ -65,6 +68,14 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent
if options != nil {
cp = *options
}
+ for _, r := range cp.subscription {
+ if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') {
+ return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription)
+ }
+ }
+ if cp.TenantID != "" && !validTenantID(cp.TenantID) {
+ return nil, errInvalidTenantID
+ }
cp.init()
cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants)
return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
@@ -73,50 +84,51 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent
// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI.
// This method is called automatically by Azure SDK clients.
func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ at := azcore.AccessToken{}
if len(opts.Scopes) != 1 {
- return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope")
+ return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope")
+ }
+ if !validScope(opts.Scopes[0]) {
+ return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0])
}
tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants)
if err != nil {
- return azcore.AccessToken{}, err
+ return at, err
}
- // pass the CLI an AAD v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
- opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
c.mu.Lock()
defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes[0], tenant)
- if err != nil {
- return azcore.AccessToken{}, err
+ b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription)
+ if err == nil {
+ at, err = c.createAccessToken(b)
}
- at, err := c.createAccessToken(b)
if err != nil {
- return azcore.AccessToken{}, err
+ err = unavailableIfInChain(err, c.opts.inDefaultChain)
+ return at, err
}
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", "))
log.Write(EventAuthentication, msg)
return at, nil
}
-var defaultTokenProvider azureCLITokenProvider = func(ctx context.Context, resource string, tenantID string) ([]byte, error) {
- match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource)
- if err != nil {
- return nil, err
- }
- if !match {
- return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource)
- }
-
+// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes
+// callers have verified that all string arguments are safe to pass to the CLI.
+var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) {
+ // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
+ resource := strings.TrimSuffix(scopes[0], defaultSuffix)
// set a default timeout for this authentication iff the application hasn't done so already
var cancel context.CancelFunc
if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest)
+ ctx, cancel = context.WithTimeout(ctx, cliTimeout)
defer cancel()
}
-
commandLine := "az account get-access-token -o json --resource " + resource
if tenantID != "" {
commandLine += " --tenant " + tenantID
}
+ if subscription != "" {
+ // subscription needs quotes because it may contain spaces
+ commandLine += ` --subscription "` + subscription + `"`
+ }
var cliCmd *exec.Cmd
if runtime.GOOS == "windows" {
dir := os.Getenv("SYSTEMROOT")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
new file mode 100644
index 00000000000..cbe7c4c2db1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
@@ -0,0 +1,169 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential"
+
+type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error)
+
+// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential.
+type AzureDeveloperCLICredentialOptions struct {
+ // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
+ // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
+ // logged in account can access.
+ AdditionallyAllowedTenants []string
+
+ // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment,
+ // which is the tenant of the selected Azure subscription.
+ TenantID string
+
+ // inDefaultChain is true when the credential is part of DefaultAzureCredential
+ inDefaultChain bool
+ // tokenProvider is used by tests to fake invoking azd
+ tokenProvider azdTokenProvider
+}
+
+// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI].
+//
+// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview
+type AzureDeveloperCLICredential struct {
+ mu *sync.Mutex
+ opts AzureDeveloperCLICredentialOptions
+}
+
+// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options.
+func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) {
+ cp := AzureDeveloperCLICredentialOptions{}
+ if options != nil {
+ cp = *options
+ }
+ if cp.TenantID != "" && !validTenantID(cp.TenantID) {
+ return nil, errInvalidTenantID
+ }
+ if cp.tokenProvider == nil {
+ cp.tokenProvider = defaultAzdTokenProvider
+ }
+ return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
+}
+
+// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd.
+// This method is called automatically by Azure SDK clients.
+func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ at := azcore.AccessToken{}
+ if len(opts.Scopes) == 0 {
+ return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope")
+ }
+ for _, scope := range opts.Scopes {
+ if !validScope(scope) {
+ return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope)
+ }
+ }
+ tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants)
+ if err != nil {
+ return at, err
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant)
+ if err == nil {
+ at, err = c.createAccessToken(b)
+ }
+ if err != nil {
+ err = unavailableIfInChain(err, c.opts.inDefaultChain)
+ return at, err
+ }
+ msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", "))
+ log.Write(EventAuthentication, msg)
+ return at, nil
+}
+
+// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes
+// callers have verified that all string arguments are safe to pass to the CLI.
+var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) {
+ // set a default timeout for this authentication iff the application hasn't done so already
+ var cancel context.CancelFunc
+ if _, hasDeadline := ctx.Deadline(); !hasDeadline {
+ ctx, cancel = context.WithTimeout(ctx, cliTimeout)
+ defer cancel()
+ }
+ commandLine := "azd auth token -o json"
+ if tenant != "" {
+ commandLine += " --tenant-id " + tenant
+ }
+ for _, scope := range scopes {
+ commandLine += " --scope " + scope
+ }
+ var cliCmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ dir := os.Getenv("SYSTEMROOT")
+ if dir == "" {
+ return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value")
+ }
+ cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
+ cliCmd.Dir = dir
+ } else {
+ cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
+ cliCmd.Dir = "/bin"
+ }
+ cliCmd.Env = os.Environ()
+ var stderr bytes.Buffer
+ cliCmd.Stderr = &stderr
+ output, err := cliCmd.Output()
+ if err != nil {
+ msg := stderr.String()
+ var exErr *exec.ExitError
+ if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") {
+ msg = "Azure Developer CLI not found on path"
+ } else if strings.Contains(msg, "azd auth login") {
+ msg = `please run "azd auth login" from a command prompt to authenticate before using this credential`
+ }
+ if msg == "" {
+ msg = err.Error()
+ }
+ return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg)
+ }
+ return output, nil
+}
+
+func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
+ t := struct {
+ AccessToken string `json:"token"`
+ ExpiresOn string `json:"expiresOn"`
+ }{}
+ err := json.Unmarshal(tk, &t)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+ exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn)
+ if err != nil {
+ return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err)
+ }
+ return azcore.AccessToken{
+ ExpiresOn: exp.UTC(),
+ Token: t.AccessToken,
+ }, nil
+}
+
+var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
index 9002ea0b050..d077682c5c2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
@@ -25,6 +25,7 @@ stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
RunLiveTests: true
+ UsePipelineProxy: false
ServiceDirectory: 'azidentity'
CloudConfig:
Public:
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
index 303d5fc0925..fc3df68eb19 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
@@ -12,6 +12,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
@@ -20,9 +21,9 @@ const credNameAssertion = "ClientAssertionCredential"
// ClientAssertionCredential authenticates an application with assertions provided by a callback function.
// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for
// the most common assertion scenario, authenticating a service principal with a certificate. See
-// [Azure AD documentation] for details of the assertion format.
+// [Microsoft Entra ID documentation] for details of the assertion format.
//
-// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format
+// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format
type ClientAssertionCredential struct {
client *confidentialClient
}
@@ -35,11 +36,15 @@ type ClientAssertionCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
+ // tokenCachePersistenceOptions enables persistent token caching when not nil.
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults.
@@ -56,9 +61,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c
},
)
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
}
c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts)
if err != nil {
@@ -67,9 +73,13 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c
return &ClientAssertionCredential{client: c}, nil
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
index d3300e3053b..607533f486e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
@@ -15,6 +15,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
"golang.org/x/crypto/pkcs12"
)
@@ -29,15 +30,20 @@ type ClientCertificateCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
// SendCertificateChain controls whether the credential sends the public certificate chain in the x5c
// header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication.
// Defaults to False.
SendCertificateChain bool
+
+ // tokenCachePersistenceOptions enables persistent token caching when not nil.
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// ClientCertificateCredential authenticates a service principal with a certificate.
@@ -58,10 +64,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x
return nil, err
}
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- SendX5C: options.SendCertificateChain,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ SendX5C: options.SendCertificateChain,
+ tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
}
c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts)
if err != nil {
@@ -70,9 +77,13 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x
return &ClientCertificateCredential{client: c}, nil
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
index d2ff7582b99..9e6772e9b80 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
@@ -11,6 +11,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
@@ -24,11 +25,15 @@ type ClientSecretCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
+ // tokenCachePersistenceOptions enables persistent token caching when not nil.
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// ClientSecretCredential authenticates an application with a client secret.
@@ -46,20 +51,25 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st
return nil, err
}
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
}
c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts)
if err != nil {
return nil, err
}
- return &ClientSecretCredential{c}, nil
+ return &ClientSecretCredential{client: c}, nil
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*ClientSecretCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
index 4853a9a0095..854267bdbfd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
@@ -10,6 +10,7 @@ import (
"context"
"errors"
"fmt"
+ "net/http"
"os"
"strings"
"sync"
@@ -17,6 +18,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
@@ -28,6 +30,7 @@ type confidentialClientOptions struct {
// Assertion for on-behalf-of authentication
Assertion string
DisableInstanceDiscovery, SendX5C bool
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// confidentialClient wraps the MSAL confidential client
@@ -40,6 +43,7 @@ type confidentialClient struct {
name string
opts confidentialClientOptions
region string
+ azClient *azcore.Client
}
func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) {
@@ -50,6 +54,14 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr
if err != nil {
return nil, err
}
+ client, err := azcore.NewClient(module, version, runtime.PipelineOptions{
+ Tracing: runtime.TracingOptions{
+ Namespace: traceNamespace,
+ },
+ }, &opts.ClientOptions)
+ if err != nil {
+ return nil, err
+ }
opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants)
return &confidentialClient{
caeMu: &sync.Mutex{},
@@ -62,6 +74,7 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr
opts: opts,
region: os.Getenv(azureRegionalAuthorityName),
tenantID: tenantID,
+ azClient: client,
}, nil
}
@@ -132,10 +145,15 @@ func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequest
}
func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) {
+ cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE)
+ if err != nil {
+ return nil, err
+ }
authority := runtime.JoinPaths(c.host, c.tenantID)
o := []confidential.Option{
confidential.WithAzureRegion(c.region),
- confidential.WithHTTPClient(newPipelineAdapter(&c.opts.ClientOptions)),
+ confidential.WithCache(cache),
+ confidential.WithHTTPClient(c),
}
if enableCAE {
o = append(o, confidential.WithClientCapabilities(cp1))
@@ -149,8 +167,18 @@ func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClie
return confidential.New(authority, c.clientID, c.cred, o...)
}
-// resolveTenant returns the correct tenant for a token request given the client's
+// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
// configuration, or an error when that configuration doesn't allow the specified tenant
func (c *confidentialClient) resolveTenant(specified string) (string, error) {
return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants)
}
+
+// these methods satisfy the MSAL ops.HTTPClient interface
+
+func (c *confidentialClient) CloseIdleConnections() {
+ // do nothing
+}
+
+func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) {
+ return doForClient(c.azClient, r)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
index 7647c60b1cb..35aeef86747 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
@@ -30,7 +30,7 @@ type DefaultAzureCredentialOptions struct {
// set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS.
AdditionallyAllowedTenants []string
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
@@ -49,6 +49,7 @@ type DefaultAzureCredentialOptions struct {
// more control over its configuration.
// - [ManagedIdentityCredential]
// - [AzureCLICredential]
+// - [AzureDeveloperCLICredential]
//
// Consult the documentation for these credential types for more information on how they authenticate.
// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
@@ -117,6 +118,17 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default
creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
}
+ azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ TenantID: options.TenantID,
+ })
+ if err == nil {
+ creds = append(creds, azdCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err})
+ }
+
if len(errorMessages) > 0 {
log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t"))
}
@@ -129,7 +141,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default
return &DefaultAzureCredential{chain: chain}, nil
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
return c.chain.GetToken(ctx, opts)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
new file mode 100644
index 00000000000..d8b952f532e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
@@ -0,0 +1,38 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "errors"
+ "time"
+)
+
+// cliTimeout is the default timeout for authentication attempts via CLI tools
+const cliTimeout = 10 * time.Second
+
+// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a
+// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try
+// the next credential in its chain (another developer credential).
+func unavailableIfInChain(err error, inDefaultChain bool) error {
+ if err != nil && inDefaultChain {
+ var unavailableErr *credentialUnavailableError
+ if !errors.As(err, &unavailableErr) {
+ err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error())
+ }
+ }
+ return err
+}
+
+// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials.
+func validScope(scope string) bool {
+ for _, r := range scope {
+ if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
index d245c269a76..1b7a283703a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
@@ -12,6 +12,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
const credNameDeviceCode = "DeviceCodeCredential"
@@ -23,19 +24,34 @@ type DeviceCodeCredentialOptions struct {
// AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
// tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
AdditionallyAllowedTenants []string
+
+ // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // to enable the credential to use data from a previous authentication.
+ authenticationRecord authenticationRecord
+
// ClientID is the ID of the application users will authenticate to.
// Defaults to the ID of an Azure development application.
ClientID string
+
+ // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
+ // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user
+ // interaction is necessary to acquire a token.
+ disableAutomaticAuthentication bool
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
- // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the
+
+ // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
// "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant
// applications.
TenantID string
+ // tokenCachePersistenceOptions enables persistent token caching when not nil.
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
+
// UserPrompt controls how the credential presents authentication instructions. The credential calls
// this function with authentication details when it receives a device code. By default, the credential
// prints these details to stdout.
@@ -63,14 +79,14 @@ type DeviceCodeMessage struct {
UserCode string `json:"user_code"`
// VerificationURL is the URL at which the user must authenticate.
VerificationURL string `json:"verification_uri"`
- // Message is user instruction from Azure Active Directory.
+ // Message is user instruction from Microsoft Entra ID.
Message string `json:"message"`
}
// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the
-// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful
+// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful
// for authenticating a user in an environment without a web browser, such as an SSH session.
-// If a web browser is available, InteractiveBrowserCredential is more convenient because it
+// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it
// automatically opens a browser to the login page.
type DeviceCodeCredential struct {
client *publicClient
@@ -84,10 +100,13 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
}
cp.init()
msalOpts := publicClientOptions{
- AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
- ClientOptions: cp.ClientOptions,
- DeviceCodePrompt: cp.UserPrompt,
- DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
+ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
+ ClientOptions: cp.ClientOptions,
+ DeviceCodePrompt: cp.UserPrompt,
+ DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
+ DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
+ Record: cp.authenticationRecord,
+ TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts)
if err != nil {
@@ -97,10 +116,23 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
return &DeviceCodeCredential{client: c}, nil
}
-// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication.
+// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
+func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.Authenticate(ctx, opts)
+ return tk, err
+}
+
+// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication.
// This method is called automatically by Azure SDK clients.
func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
index 7ecd928e024..42f84875e23 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
@@ -25,7 +25,7 @@ type EnvironmentCredentialOptions struct {
azcore.ClientOptions
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
@@ -156,7 +156,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme
return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set")
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
return c.cred.GetToken(ctx, opts)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
index e1a21e0030a..335d2b7dcf2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
@@ -18,6 +18,10 @@ import (
msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
)
+// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token
+// because user interaction is required and the credential is configured not to automatically prompt the user.
+var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"}
+
// getResponseFromError retrieves the response carried by
// an AuthenticationFailedError or MSAL CallErr, if any
func getResponseFromError(err error) *http.Response {
@@ -53,7 +57,13 @@ func (e *AuthenticationFailedError) Error() string {
}
msg := &bytes.Buffer{}
fmt.Fprintf(msg, e.credType+" authentication failed\n")
- fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
+ if e.RawResponse.Request != nil {
+ fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
+ } else {
+ // this happens when the response is created from a custom HTTP transporter,
+ // which doesn't guarantee to bind the original request to the response
+ fmt.Fprintln(msg, "Request information not available")
+ }
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status)
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
@@ -74,6 +84,8 @@ func (e *AuthenticationFailedError) Error() string {
switch e.credType {
case credNameAzureCLI:
anchor = "azure-cli"
+ case credNameAzureDeveloperCLI:
+ anchor = "azd"
case credNameCert:
anchor = "client-cert"
case credNameSecret:
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
new file mode 100644
index 00000000000..04ea962b422
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
@@ -0,0 +1,6 @@
+go 1.18
+
+use (
+ .
+ ./cache
+)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
new file mode 100644
index 00000000000..7cd86b0019e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
@@ -0,0 +1,39 @@
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
index 08f3efbf3ec..bd829698375 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
@@ -11,6 +11,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
const credNameBrowser = "InteractiveBrowserCredential"
@@ -22,26 +23,40 @@ type InteractiveBrowserCredentialOptions struct {
// AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
// tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
AdditionallyAllowedTenants []string
+
+ // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // to enable the credential to use data from a previous authentication.
+ authenticationRecord authenticationRecord
+
// ClientID is the ID of the application users will authenticate to.
// Defaults to the ID of an Azure development application.
ClientID string
+ // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
+ // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when
+ // user interaction is necessary to acquire a token.
+ disableAutomaticAuthentication bool
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
// LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account.
LoginHint string
- // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required
+
+ // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required
// only when setting ClientID, and must match a redirect URI in the application's registration.
// Applications which have registered "http://localhost" as a redirect URI need not set this option.
RedirectURL string
- // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the
+ // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
// "organizations" tenant, which can authenticate work and school accounts.
TenantID string
+
+ // tokenCachePersistenceOptions enables persistent token caching when not nil.
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
func (o *InteractiveBrowserCredentialOptions) init() {
@@ -66,10 +81,14 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
}
cp.init()
msalOpts := publicClientOptions{
- ClientOptions: cp.ClientOptions,
- DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
- LoginHint: cp.LoginHint,
- RedirectURL: cp.RedirectURL,
+ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
+ ClientOptions: cp.ClientOptions,
+ DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
+ DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
+ LoginHint: cp.LoginHint,
+ Record: cp.authenticationRecord,
+ RedirectURL: cp.RedirectURL,
+ TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts)
if err != nil {
@@ -78,9 +97,22 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
return &InteractiveBrowserCredential{client: c}, nil
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
+func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.Authenticate(ctx, opts)
+ return tk, err
+}
+
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
new file mode 100644
index 00000000000..b1b4d5c8bd3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
@@ -0,0 +1,18 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package internal
+
+// TokenCachePersistenceOptions contains options for persistent token caching
+type TokenCachePersistenceOptions struct {
+ // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text
+ // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts
+ // encryption before falling back to plaintext storage.
+ AllowUnencryptedStorage bool
+
+ // Name identifies the cache. Set this to isolate data from other applications.
+ Name string
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
new file mode 100644
index 00000000000..c1498b46447
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
@@ -0,0 +1,31 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package internal
+
+import (
+ "errors"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
+)
+
+var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching")
+
+// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to
+// use a persistent cache must first import the cache module, which will replace this function
+// with a platform-specific implementation.
+var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) {
+ if o == nil {
+ return nil, nil
+ }
+ return nil, errMissingImport
+}
+
+// CacheFilePath returns the path to the cache file for the given name.
+// Defining it in this package makes it available to azidentity tests.
+var CacheFilePath = func(name string) (string, error) {
+ return "", errMissingImport
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
index fdc3c1f6776..7c25cb8bdd5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
@@ -28,12 +28,14 @@ import (
const (
arcIMDSEndpoint = "IMDS_ENDPOINT"
+ defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID"
identityEndpoint = "IDENTITY_ENDPOINT"
identityHeader = "IDENTITY_HEADER"
identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT"
headerMetadata = "Metadata"
imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
msiEndpoint = "MSI_ENDPOINT"
+ msiSecret = "MSI_SECRET"
imdsAPIVersion = "2018-02-01"
azureArcAPIVersion = "2019-08-15"
serviceFabricAPIVersion = "2019-07-01-preview"
@@ -47,6 +49,7 @@ type msiType int
const (
msiTypeAppService msiType = iota
msiTypeAzureArc
+ msiTypeAzureML
msiTypeCloudShell
msiTypeIMDS
msiTypeServiceFabric
@@ -55,7 +58,7 @@ const (
// managedIdentityClient provides the base for authenticating in managed identity environments
// This type includes an runtime.Pipeline and TokenCredentialOptions.
type managedIdentityClient struct {
- pipeline runtime.Pipeline
+ azClient *azcore.Client
msiType msiType
endpoint string
id ManagedIDKind
@@ -135,13 +138,27 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
c.msiType = msiTypeAzureArc
}
} else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
- env = "Cloud Shell"
c.endpoint = endpoint
- c.msiType = msiTypeCloudShell
+ if _, ok := os.LookupEnv(msiSecret); ok {
+ env = "Azure ML"
+ c.msiType = msiTypeAzureML
+ } else {
+ env = "Cloud Shell"
+ c.msiType = msiTypeCloudShell
+ }
} else {
setIMDSRetryOptionDefaults(&cp.Retry)
}
- c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp)
+
+ client, err := azcore.NewClient(module, version, runtime.PipelineOptions{
+ Tracing: runtime.TracingOptions{
+ Namespace: traceNamespace,
+ },
+ }, &cp)
+ if err != nil {
+ return nil, err
+ }
+ c.azClient = client
if log.Should(EventAuthentication) {
log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
@@ -168,7 +185,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
return azcore.AccessToken{}, err
}
- resp, err := c.pipeline.Do(msg)
+ resp, err := c.azClient.Pipeline().Do(msg)
if err != nil {
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err)
}
@@ -247,6 +264,8 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage
return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err)
}
return c.createAzureArcAuthRequest(ctx, id, scopes, key)
+ case msiTypeAzureML:
+ return c.createAzureMLAuthRequest(ctx, id, scopes)
case msiTypeServiceFabric:
return c.createServiceFabricAuthRequest(ctx, id, scopes)
case msiTypeCloudShell:
@@ -296,6 +315,29 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context,
return request, nil
}
+func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
+ request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ request.Raw().Header.Set("secret", os.Getenv(msiSecret))
+ q := request.Raw().URL.Query()
+ q.Add("api-version", "2017-09-01")
+ q.Add("resource", strings.Join(scopes, " "))
+ q.Add("clientid", os.Getenv(defaultIdentityClientID))
+ if id != nil {
+ if id.idKind() == miResourceID {
+ log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID")
+ q.Set("clientid", "")
+ q.Set(qpResID, id.String())
+ } else {
+ q.Set("clientid", id.String())
+ }
+ }
+ request.Raw().URL.RawQuery = q.Encode()
+ return request, nil
+}
+
func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
if err != nil {
@@ -330,7 +372,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour
q.Add("resource", strings.Join(resources, " "))
request.Raw().URL.RawQuery = q.Encode()
// send the initial request to get the short-lived secret key
- response, err := c.pipeline.Do(request)
+ response, err := c.azClient.Pipeline().Do(request)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
index 35c5e6725cd..dcd278befa1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
@@ -13,6 +13,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
@@ -67,8 +68,8 @@ type ManagedIdentityCredentialOptions struct {
// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities.
// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a
-// user-assigned identity. See Azure Active Directory documentation for more information about managed identities:
-// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview
+// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities:
+// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview
type ManagedIdentityCredential struct {
client *confidentialClient
mic *managedIdentityClient
@@ -92,7 +93,9 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M
clientID = options.ID.String()
}
// similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value
- c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{})
+ c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{
+ ClientOptions: options.ClientOptions,
+ })
if err != nil {
return nil, err
}
@@ -101,13 +104,18 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M
// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+
if len(opts.Scopes) != 1 {
- err := fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
+ err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
return azcore.AccessToken{}, err
}
- // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
+ // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
- return c.client.GetToken(ctx, opts)
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go
index 2b360b681df..5e67cf02145 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go
@@ -13,6 +13,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
@@ -21,9 +22,9 @@ const credNameOBO = "OnBehalfOfCredential"
// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by
// middle-tier services that authorize requests to other services with a delegated user identity. Because this
// is not an interactive authentication flow, an application using it must have admin consent for any delegated
-// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details.
+// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details.
//
-// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow
+// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow
type OnBehalfOfCredential struct {
client *confidentialClient
}
@@ -36,11 +37,13 @@ type OnBehalfOfCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
// SendCertificateChain applies only when the credential is configured to authenticate with a certificate.
// This setting controls whether the credential sends the public certificate chain in the x5c header of each
// token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication.
@@ -84,9 +87,13 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf
return &OnBehalfOfCredential{c}, nil
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return o.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := o.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
index 6512d3e25fd..63c31190d18 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
@@ -8,38 +8,52 @@ package azidentity
import (
"context"
+ "errors"
"fmt"
+ "net/http"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
+
+ // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate()
+ _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime"
)
type publicClientOptions struct {
azcore.ClientOptions
- AdditionallyAllowedTenants []string
- DeviceCodePrompt func(context.Context, DeviceCodeMessage) error
- DisableInstanceDiscovery bool
- LoginHint, RedirectURL string
- Username, Password string
+ AdditionallyAllowedTenants []string
+ DeviceCodePrompt func(context.Context, DeviceCodeMessage) error
+ DisableAutomaticAuthentication bool
+ DisableInstanceDiscovery bool
+ LoginHint, RedirectURL string
+ Record authenticationRecord
+ TokenCachePersistenceOptions *tokenCachePersistenceOptions
+ Username, Password string
}
// publicClient wraps the MSAL public client
type publicClient struct {
- account public.Account
cae, noCAE msalPublicClient
caeMu, noCAEMu, clientMu *sync.Mutex
clientID, tenantID string
+ defaultScope []string
host string
name string
opts publicClientOptions
+ record authenticationRecord
+ azClient *azcore.Client
}
+var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions")
+
func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) {
if !validTenantID(tenantID) {
return nil, errInvalidTenantID
@@ -48,19 +62,76 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p
if err != nil {
return nil, err
}
+ // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate()
+ audience := o.Cloud.Services[cloud.ResourceManager].Audience
+ if audience == "" {
+ // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash)
+ if !strings.HasSuffix(host, "/") {
+ host += "/"
+ }
+ switch host {
+ case cloud.AzureChina.ActiveDirectoryAuthorityHost:
+ audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience
+ case cloud.AzureGovernment.ActiveDirectoryAuthorityHost:
+ audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience
+ case cloud.AzurePublic.ActiveDirectoryAuthorityHost:
+ audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience
+ }
+ }
+ // if we didn't come up with an audience, the application will have to specify a scope for Authenticate()
+ var defaultScope []string
+ if audience != "" {
+ defaultScope = []string{audience + defaultSuffix}
+ }
+ client, err := azcore.NewClient(module, version, runtime.PipelineOptions{
+ Tracing: runtime.TracingOptions{
+ Namespace: traceNamespace,
+ },
+ }, &o.ClientOptions)
+ if err != nil {
+ return nil, err
+ }
o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants)
return &publicClient{
- caeMu: &sync.Mutex{},
- clientID: clientID,
- clientMu: &sync.Mutex{},
- host: host,
- name: name,
- noCAEMu: &sync.Mutex{},
- opts: o,
- tenantID: tenantID,
+ caeMu: &sync.Mutex{},
+ clientID: clientID,
+ clientMu: &sync.Mutex{},
+ defaultScope: defaultScope,
+ host: host,
+ name: name,
+ noCAEMu: &sync.Mutex{},
+ opts: o,
+ record: o.Record,
+ tenantID: tenantID,
+ azClient: client,
}, nil
}
+func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) {
+ if tro == nil {
+ tro = &policy.TokenRequestOptions{}
+ }
+ if len(tro.Scopes) == 0 {
+ if p.defaultScope == nil {
+ return authenticationRecord{}, errScopeRequired
+ }
+ tro.Scopes = p.defaultScope
+ }
+ client, mu, err := p.client(*tro)
+ if err != nil {
+ return authenticationRecord{}, err
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ _, err = p.reqToken(ctx, client, *tro)
+ if err == nil {
+ scope := strings.Join(tro.Scopes, ", ")
+ msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope)
+ log.Write(EventAuthentication, msg)
+ }
+ return p.record, err
+}
+
// GetToken requests an access token from MSAL, checking the cache first.
func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
if len(tro.Scopes) < 1 {
@@ -76,10 +147,13 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti
}
mu.Lock()
defer mu.Unlock()
- ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.account), public.WithClaims(tro.Claims), public.WithTenantID(tenant))
+ ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant))
if err == nil {
return p.token(ar, err)
}
+ if p.opts.DisableAutomaticAuthentication {
+ return azcore.AccessToken{}, errAuthenticationRequired
+ }
at, err := p.reqToken(ctx, client, tro)
if err == nil {
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", "))
@@ -148,9 +222,14 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient,
}
func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
+ cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE)
+ if err != nil {
+ return nil, err
+ }
o := []public.Option{
public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)),
- public.WithHTTPClient(newPipelineAdapter(&p.opts.ClientOptions)),
+ public.WithCache(cache),
+ public.WithHTTPClient(p),
}
if enableCAE {
o = append(o, public.WithClientCapabilities(cp1))
@@ -163,7 +242,7 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) {
if err == nil {
- p.account = ar.Account
+ p.record, err = newAuthenticationRecord(ar)
} else {
res := getResponseFromError(err)
err = newAuthenticationFailedError(p.name, err.Error(), res, err)
@@ -171,8 +250,24 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
}
-// resolveTenant returns the correct tenant for a token request given the client's
+// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
// configuration, or an error when that configuration doesn't allow the specified tenant
func (p *publicClient) resolveTenant(specified string) (string, error) {
- return resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants)
+ t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants)
+ if t == p.tenantID {
+ // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify
+ // the client's default tenant and doing so is an error when that tenant is "organizations"
+ t = ""
+ }
+ return t, err
+}
+
+// these methods satisfy the MSAL ops.HTTPClient interface
+
+func (p *publicClient) CloseIdleConnections() {
+ // do nothing
+}
+
+func (p *publicClient) Do(r *http.Request) (*http.Response, error) {
+ return doForClient(p.azClient, r)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
index f787ec0ce18..294ed81e951 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
@@ -11,6 +11,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
const credNameUserPassword = "UsernamePasswordCredential"
@@ -23,11 +24,19 @@ type UsernamePasswordCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
+ // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // to enable the credential to use data from a previous authentication.
+ authenticationRecord authenticationRecord
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
+ // tokenCachePersistenceOptions enables persistent token caching when not nil.
+ tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
@@ -45,11 +54,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
options = &UsernamePasswordCredentialOptions{}
}
opts := publicClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- Password: password,
- Username: username,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ Password: password,
+ Record: options.authenticationRecord,
+ TokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ Username: username,
}
c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts)
if err != nil {
@@ -58,9 +69,22 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
return &UsernamePasswordCredential{client: c}, err
}
-// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
+func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.Authenticate(ctx, opts)
+ return tk, err
+}
+
+// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.client.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := c.client.GetToken(ctx, opts)
+ return tk, err
}
var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
index 65e74e31e3b..e8caeea71ef 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
@@ -10,6 +10,9 @@ const (
// UserAgent is the string to be used in the user agent string when making requests.
component = "azidentity"
+ // module is the fully qualified name of the module used in telemetry and distributed tracing.
+ module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
+
// Version is the semantic version (see http://semver.org) of this module.
- version = "v1.4.0"
+ version = "v1.5.1"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
index 7e016324d22..3e43e788e93 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
@@ -15,6 +15,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
const credNameWorkloadIdentity = "WorkloadIdentityCredential"
@@ -41,7 +42,7 @@ type WorkloadIdentityCredentialOptions struct {
// ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID.
ClientID string
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata
+ // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
@@ -93,9 +94,13 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (
return &w, nil
}
-// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically.
+// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically.
func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return w.cred.GetToken(ctx, opts)
+ var err error
+ ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil)
+ defer func() { endSpan(err) }()
+ tk, err := w.cred.GetToken(ctx, opts)
+ return tk, err
}
// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go
index d4ed6ccc8ad..9948f604b30 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go
@@ -39,6 +39,11 @@ type PayloadOptions struct {
// Subsequent reads will access the cached value.
// Exported as runtime.Payload() WITHOUT the opts parameter.
func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) {
+ if resp.Body == nil {
+ // this shouldn't happen in real-world scenarios as a
+ // response with no body should set it to http.NoBody
+ return nil, nil
+ }
modifyBytes := func(b []byte) []byte { return b }
if opts != nil && opts.BytesModifier != nil {
modifyBytes = opts.BytesModifier
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md
index 8266fc04f4f..b10ffcc6a75 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md
@@ -1,5 +1,27 @@
# Release History
+## 1.1.0 (2024-02-13)
+
+### Other Changes
+* Upgraded to API service version `7.5`
+* Upgraded dependencies
+
+## 1.1.0-beta.2 (2023-11-08)
+
+### Features Added
+* Added the `HSMPlatform` field to the `KeyAttributes` struct
+
+### Other Changes
+* Upgraded service version to `7.5-preview.1`
+* Updated to latest version of `azcore`.
+* Fixed value of `otel.library.name` in traces.
+
+## 1.1.0-beta.1 (2023-10-11)
+
+### Features Added
+
+* Enabled spans for distributed tracing.
+
## 1.0.1 (2023-08-23)
### Other Changes
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json
index 1da72428681..00d485deb1d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/security/keyvault/azkeys",
- "Tag": "go/security/keyvault/azkeys_afbe036428"
+ "Tag": "go/security/keyvault/azkeys_2d421aec6c"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md
index e2f6b7eb9e4..b067fd8013f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md
@@ -4,7 +4,7 @@
clear-output-folder: false
export-clients: true
go: true
-input-file: https://github.com/Azure/azure-rest-api-specs/blob/551275acb80e1f8b39036b79dfc35a8f63b601a7/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.4/keys.json
+input-file: https://github.com/Azure/azure-rest-api-specs/blob/7452e1cc7db72fbc6cd9539b390d8b8e5c2a1864/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.5/keys.json
license-header: MICROSOFT_MIT_NO_VERSION
module: github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys
openapi-type: "data-plane"
@@ -12,7 +12,8 @@ output-folder: ../azkeys
override-client-name: Client
security: "AADToken"
security-scopes: "https://vault.azure.net/.default"
-use: "@autorest/go@4.0.0-preview.54"
+use: "@autorest/go@4.0.0-preview.59"
+inject-spans: true
version: "^3.0.0"
directive:
@@ -95,6 +96,12 @@ directive:
- rename-model:
from: LifetimeActionsTrigger
to: LifetimeActionTrigger
+
+ # Rename HsmPlatform to HSMPlatform for consistency
+ - where-model: KeyAttributes
+ rename-property:
+ from: hsmPlatform
+ to: HSMPlatform
# Remove MaxResults parameter
- where: "$.paths..*"
@@ -220,6 +227,7 @@ directive:
- models.go
- options.go
- response_types.go
+ - options.go
where: $
transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2");
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml
similarity index 74%
rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml
rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml
index f5f7c91738a..86d11e976b7 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.security.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml
@@ -27,8 +27,14 @@ stages:
TimeoutInMinutes: 120
ServiceDirectory: 'security/keyvault/azkeys'
RunLiveTests: true
+ UsePipelineProxy: false
AdditionalMatrixConfigs:
- Name: keyvault_test_matrix_addons
Path: sdk/security/keyvault/azkeys/platform-matrix.json
Selection: sparse
GenerateVMJobs: true
+
+ # Due to the high cost of Managed HSMs, we only want to test using them weekly.
+ ${{ if not(contains(variables['Build.DefinitionName'], 'tests-weekly')) }}:
+ MatrixFilters:
+ - ArmTemplateParameters=^(?!.*enableHsm.*true)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go
index 799ab5ad45d..bb4afa61a2b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go
@@ -38,11 +38,13 @@ type Client struct {
// operation requires the key/backup permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - options - BackupKeyOptions contains the optional parameters for the Client.BackupKey method.
func (client *Client) BackupKey(ctx context.Context, name string, options *BackupKeyOptions) (BackupKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.BackupKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.backupKeyCreateRequest(ctx, name, options)
if err != nil {
return BackupKeyResponse{}, err
@@ -71,7 +73,7 @@ func (client *Client) backupKeyCreateRequest(ctx context.Context, name string, o
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -91,7 +93,7 @@ func (client *Client) backupKeyHandleResponse(resp *http.Response) (BackupKeyRes
// permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name for the new key. The system will generate the version name for the new key. The value you provide may be
// copied globally for the purpose of running the service. The value provided should not
// include personally identifiable or sensitive information.
@@ -99,6 +101,8 @@ func (client *Client) backupKeyHandleResponse(resp *http.Response) (BackupKeyRes
// - options - CreateKeyOptions contains the optional parameters for the Client.CreateKey method.
func (client *Client) CreateKey(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (CreateKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.CreateKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.createKeyCreateRequest(ctx, name, parameters, options)
if err != nil {
return CreateKeyResponse{}, err
@@ -127,7 +131,7 @@ func (client *Client) createKeyCreateRequest(ctx context.Context, name string, p
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -155,13 +159,15 @@ func (client *Client) createKeyHandleResponse(resp *http.Response) (CreateKeyRes
// for more information.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - version - The version of the key.
// - parameters - The parameters for the decryption operation.
// - options - DecryptOptions contains the optional parameters for the Client.Decrypt method.
func (client *Client) Decrypt(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *DecryptOptions) (DecryptResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.Decrypt", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.decryptCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return DecryptResponse{}, err
@@ -191,7 +197,7 @@ func (client *Client) decryptCreateRequest(ctx context.Context, name string, ver
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -214,11 +220,13 @@ func (client *Client) decryptHandleResponse(resp *http.Response) (DecryptRespons
// Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the keys/delete permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key to delete.
// - options - DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method.
func (client *Client) DeleteKey(ctx context.Context, name string, options *DeleteKeyOptions) (DeleteKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.DeleteKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.deleteKeyCreateRequest(ctx, name, options)
if err != nil {
return DeleteKeyResponse{}, err
@@ -247,7 +255,7 @@ func (client *Client) deleteKeyCreateRequest(ctx context.Context, name string, o
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -271,13 +279,15 @@ func (client *Client) deleteKeyHandleResponse(resp *http.Response) (DeleteKeyRes
// public key material. This operation requires the keys/encrypt permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - version - The version of the key.
// - parameters - The parameters for the encryption operation.
// - options - EncryptOptions contains the optional parameters for the Client.Encrypt method.
func (client *Client) Encrypt(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *EncryptOptions) (EncryptResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.Encrypt", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.encryptCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return EncryptResponse{}, err
@@ -307,7 +317,7 @@ func (client *Client) encryptCreateRequest(ctx context.Context, name string, ver
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -330,11 +340,13 @@ func (client *Client) encryptHandleResponse(resp *http.Response) (EncryptRespons
// operation requires the keys/get permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - options - GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method.
func (client *Client) GetDeletedKey(ctx context.Context, name string, options *GetDeletedKeyOptions) (GetDeletedKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.GetDeletedKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.getDeletedKeyCreateRequest(ctx, name, options)
if err != nil {
return GetDeletedKeyResponse{}, err
@@ -363,7 +375,7 @@ func (client *Client) getDeletedKeyCreateRequest(ctx context.Context, name strin
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -382,13 +394,15 @@ func (client *Client) getDeletedKeyHandleResponse(resp *http.Response) (GetDelet
// is released in the response. This operation requires the keys/get permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key to get.
// - version - Adding the version parameter retrieves a specific version of a key. This URI fragment is optional. If not specified,
// the latest version of the key is returned.
// - options - GetKeyOptions contains the optional parameters for the Client.GetKey method.
func (client *Client) GetKey(ctx context.Context, name string, version string, options *GetKeyOptions) (GetKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.GetKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.getKeyCreateRequest(ctx, name, version, options)
if err != nil {
return GetKeyResponse{}, err
@@ -418,7 +432,7 @@ func (client *Client) getKeyCreateRequest(ctx context.Context, name string, vers
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -437,11 +451,13 @@ func (client *Client) getKeyHandleResponse(resp *http.Response) (GetKeyResponse,
// vault. This operation requires the keys/get permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key in a given key vault.
// - options - GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method.
func (client *Client) GetKeyRotationPolicy(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (GetKeyRotationPolicyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.GetKeyRotationPolicy", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.getKeyRotationPolicyCreateRequest(ctx, name, options)
if err != nil {
return GetKeyRotationPolicyResponse{}, err
@@ -470,7 +486,7 @@ func (client *Client) getKeyRotationPolicyCreateRequest(ctx context.Context, nam
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -488,11 +504,13 @@ func (client *Client) getKeyRotationPolicyHandleResponse(resp *http.Response) (G
// GetRandomBytes - Get the requested number of bytes containing random values from a managed HSM.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - parameters - The request object to get random bytes.
// - options - GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method.
func (client *Client) GetRandomBytes(ctx context.Context, parameters GetRandomBytesParameters, options *GetRandomBytesOptions) (GetRandomBytesResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.GetRandomBytes", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.getRandomBytesCreateRequest(ctx, parameters, options)
if err != nil {
return GetRandomBytesResponse{}, err
@@ -517,7 +535,7 @@ func (client *Client) getRandomBytesCreateRequest(ctx context.Context, parameter
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -540,13 +558,15 @@ func (client *Client) getRandomBytesHandleResponse(resp *http.Response) (GetRand
// keys/import permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - Name for the imported key. The value you provide may be copied globally for the purpose of running the service.
// The value provided should not include personally identifiable or sensitive information.
// - parameters - The parameters to import a key.
// - options - ImportKeyOptions contains the optional parameters for the Client.ImportKey method.
func (client *Client) ImportKey(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (ImportKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.ImportKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.importKeyCreateRequest(ctx, name, parameters, options)
if err != nil {
return ImportKeyResponse{}, err
@@ -575,7 +595,7 @@ func (client *Client) importKeyCreateRequest(ctx context.Context, name string, p
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -599,7 +619,7 @@ func (client *Client) importKeyHandleResponse(resp *http.Response) (ImportKeyRes
// an error if invoked on a non soft-delete enabled vault. This operation
// requires the keys/list permission.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - options - ListDeletedKeyPropertiesOptions contains the optional parameters for the Client.NewListDeletedKeyPropertiesPager
// method.
func (client *Client) NewListDeletedKeyPropertiesPager(options *ListDeletedKeyPropertiesOptions) *runtime.Pager[ListDeletedKeyPropertiesResponse] {
@@ -608,25 +628,19 @@ func (client *Client) NewListDeletedKeyPropertiesPager(options *ListDeletedKeyPr
return page.NextLink != nil && len(*page.NextLink) > 0
},
Fetcher: func(ctx context.Context, page *ListDeletedKeyPropertiesResponse) (ListDeletedKeyPropertiesResponse, error) {
- var req *policy.Request
- var err error
- if page == nil {
- req, err = client.listDeletedKeyPropertiesCreateRequest(ctx, options)
- } else {
- req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)
+ nextLink := ""
+ if page != nil {
+ nextLink = *page.NextLink
}
+ resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) {
+ return client.listDeletedKeyPropertiesCreateRequest(ctx, options)
+ }, nil)
if err != nil {
return ListDeletedKeyPropertiesResponse{}, err
}
- resp, err := client.internal.Pipeline().Do(req)
- if err != nil {
- return ListDeletedKeyPropertiesResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ListDeletedKeyPropertiesResponse{}, runtime.NewResponseError(resp)
- }
return client.listDeletedKeyPropertiesHandleResponse(resp)
},
+ Tracer: client.internal.Tracer(),
})
}
@@ -638,7 +652,7 @@ func (client *Client) listDeletedKeyPropertiesCreateRequest(ctx context.Context,
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -658,7 +672,7 @@ func (client *Client) listDeletedKeyPropertiesHandleResponse(resp *http.Response
// identifier, attributes, and tags are provided in the response. Individual versions of a key are not listed in the response.
// This operation requires the keys/list permission.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - options - ListKeyPropertiesOptions contains the optional parameters for the Client.NewListKeyPropertiesPager method.
func (client *Client) NewListKeyPropertiesPager(options *ListKeyPropertiesOptions) *runtime.Pager[ListKeyPropertiesResponse] {
return runtime.NewPager(runtime.PagingHandler[ListKeyPropertiesResponse]{
@@ -666,25 +680,19 @@ func (client *Client) NewListKeyPropertiesPager(options *ListKeyPropertiesOption
return page.NextLink != nil && len(*page.NextLink) > 0
},
Fetcher: func(ctx context.Context, page *ListKeyPropertiesResponse) (ListKeyPropertiesResponse, error) {
- var req *policy.Request
- var err error
- if page == nil {
- req, err = client.listKeyPropertiesCreateRequest(ctx, options)
- } else {
- req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)
- }
- if err != nil {
- return ListKeyPropertiesResponse{}, err
+ nextLink := ""
+ if page != nil {
+ nextLink = *page.NextLink
}
- resp, err := client.internal.Pipeline().Do(req)
+ resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) {
+ return client.listKeyPropertiesCreateRequest(ctx, options)
+ }, nil)
if err != nil {
return ListKeyPropertiesResponse{}, err
}
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ListKeyPropertiesResponse{}, runtime.NewResponseError(resp)
- }
return client.listKeyPropertiesHandleResponse(resp)
},
+ Tracer: client.internal.Tracer(),
})
}
@@ -696,7 +704,7 @@ func (client *Client) listKeyPropertiesCreateRequest(ctx context.Context, option
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -714,7 +722,7 @@ func (client *Client) listKeyPropertiesHandleResponse(resp *http.Response) (List
// NewListKeyPropertiesVersionsPager - The full key identifier, attributes, and tags are provided in the response. This operation
// requires the keys/list permission.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - options - ListKeyPropertiesVersionsOptions contains the optional parameters for the Client.NewListKeyPropertiesVersionsPager
// method.
@@ -724,25 +732,19 @@ func (client *Client) NewListKeyPropertiesVersionsPager(name string, options *Li
return page.NextLink != nil && len(*page.NextLink) > 0
},
Fetcher: func(ctx context.Context, page *ListKeyPropertiesVersionsResponse) (ListKeyPropertiesVersionsResponse, error) {
- var req *policy.Request
- var err error
- if page == nil {
- req, err = client.listKeyPropertiesVersionsCreateRequest(ctx, name, options)
- } else {
- req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)
- }
- if err != nil {
- return ListKeyPropertiesVersionsResponse{}, err
+ nextLink := ""
+ if page != nil {
+ nextLink = *page.NextLink
}
- resp, err := client.internal.Pipeline().Do(req)
+ resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) {
+ return client.listKeyPropertiesVersionsCreateRequest(ctx, name, options)
+ }, nil)
if err != nil {
return ListKeyPropertiesVersionsResponse{}, err
}
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ListKeyPropertiesVersionsResponse{}, runtime.NewResponseError(resp)
- }
return client.listKeyPropertiesVersionsHandleResponse(resp)
},
+ Tracer: client.internal.Tracer(),
})
}
@@ -758,7 +760,7 @@ func (client *Client) listKeyPropertiesVersionsCreateRequest(ctx context.Context
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -778,11 +780,13 @@ func (client *Client) listKeyPropertiesVersionsHandleResponse(resp *http.Respons
// This operation requires the keys/purge permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key
// - options - PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method.
func (client *Client) PurgeDeletedKey(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (PurgeDeletedKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.PurgeDeletedKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.purgeDeletedKeyCreateRequest(ctx, name, options)
if err != nil {
return PurgeDeletedKeyResponse{}, err
@@ -810,7 +814,7 @@ func (client *Client) purgeDeletedKeyCreateRequest(ctx context.Context, name str
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -822,11 +826,13 @@ func (client *Client) purgeDeletedKeyCreateRequest(ctx context.Context, name str
// requires the keys/recover permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the deleted key.
// - options - RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method.
func (client *Client) RecoverDeletedKey(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (RecoverDeletedKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.RecoverDeletedKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.recoverDeletedKeyCreateRequest(ctx, name, options)
if err != nil {
return RecoverDeletedKeyResponse{}, err
@@ -855,7 +861,7 @@ func (client *Client) recoverDeletedKeyCreateRequest(ctx context.Context, name s
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -874,13 +880,15 @@ func (client *Client) recoverDeletedKeyHandleResponse(resp *http.Response) (Reco
// requires the keys/release permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key to get.
// - version - Adding the version parameter retrieves a specific version of a key.
// - parameters - The parameters for the key release operation.
// - options - ReleaseOptions contains the optional parameters for the Client.Release method.
func (client *Client) Release(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (ReleaseResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.Release", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.releaseCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return ReleaseResponse{}, err
@@ -910,7 +918,7 @@ func (client *Client) releaseCreateRequest(ctx context.Context, name string, ver
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -939,11 +947,13 @@ func (client *Client) releaseHandleResponse(resp *http.Response) (ReleaseRespons
// The user must have RESTORE permission in the target Key Vault. This operation requires the keys/restore permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - parameters - The parameters to restore the key.
// - options - RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method.
func (client *Client) RestoreKey(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (RestoreKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.RestoreKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.restoreKeyCreateRequest(ctx, parameters, options)
if err != nil {
return RestoreKeyResponse{}, err
@@ -968,7 +978,7 @@ func (client *Client) restoreKeyCreateRequest(ctx context.Context, parameters Re
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -989,11 +999,13 @@ func (client *Client) restoreKeyHandleResponse(resp *http.Response) (RestoreKeyR
// RotateKey - The operation will rotate the key based on the key policy. It requires the keys/rotate permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of key to be rotated. The system will generate a new version in the specified key.
// - options - RotateKeyOptions contains the optional parameters for the Client.RotateKey method.
func (client *Client) RotateKey(ctx context.Context, name string, options *RotateKeyOptions) (RotateKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.RotateKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.rotateKeyCreateRequest(ctx, name, options)
if err != nil {
return RotateKeyResponse{}, err
@@ -1022,7 +1034,7 @@ func (client *Client) rotateKeyCreateRequest(ctx context.Context, name string, o
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
return req, nil
@@ -1041,13 +1053,15 @@ func (client *Client) rotateKeyHandleResponse(resp *http.Response) (RotateKeyRes
// uses the private portion of the key. This operation requires the keys/sign permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - version - The version of the key.
// - parameters - The parameters for the signing operation.
// - options - SignOptions contains the optional parameters for the Client.Sign method.
func (client *Client) Sign(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (SignResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.Sign", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.signCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return SignResponse{}, err
@@ -1077,7 +1091,7 @@ func (client *Client) signCreateRequest(ctx context.Context, name string, versio
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -1101,13 +1115,15 @@ func (client *Client) signHandleResponse(resp *http.Response) (SignResponse, err
// permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - version - The version of the key.
// - parameters - The parameters for the key operation.
// - options - UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method.
func (client *Client) UnwrapKey(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *UnwrapKeyOptions) (UnwrapKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.UnwrapKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.unwrapKeyCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return UnwrapKeyResponse{}, err
@@ -1137,7 +1153,7 @@ func (client *Client) unwrapKeyCreateRequest(ctx context.Context, name string, v
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -1159,13 +1175,15 @@ func (client *Client) unwrapKeyHandleResponse(resp *http.Response) (UnwrapKeyRes
// of a key itself cannot be changed. This operation requires the keys/update permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of key to update.
// - version - The version of the key to update.
// - parameters - The parameters of the key to update.
// - options - UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method.
func (client *Client) UpdateKey(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (UpdateKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.UpdateKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.updateKeyCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return UpdateKeyResponse{}, err
@@ -1195,7 +1213,7 @@ func (client *Client) updateKeyCreateRequest(ctx context.Context, name string, v
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -1217,13 +1235,15 @@ func (client *Client) updateKeyHandleResponse(resp *http.Response) (UpdateKeyRes
// keys/update permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key in the given vault.
// - keyRotationPolicy - The policy for the key.
// - options - UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy
// method.
func (client *Client) UpdateKeyRotationPolicy(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (UpdateKeyRotationPolicyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.UpdateKeyRotationPolicy", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.updateKeyRotationPolicyCreateRequest(ctx, name, keyRotationPolicy, options)
if err != nil {
return UpdateKeyRotationPolicyResponse{}, err
@@ -1252,7 +1272,7 @@ func (client *Client) updateKeyRotationPolicyCreateRequest(ctx context.Context,
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, keyRotationPolicy); err != nil {
@@ -1277,13 +1297,15 @@ func (client *Client) updateKeyRotationPolicyHandleResponse(resp *http.Response)
// the keys/verify permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - version - The version of the key.
// - parameters - The parameters for verify operations.
// - options - VerifyOptions contains the optional parameters for the Client.Verify method.
func (client *Client) Verify(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (VerifyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.Verify", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.verifyCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return VerifyResponse{}, err
@@ -1313,7 +1335,7 @@ func (client *Client) verifyCreateRequest(ctx context.Context, name string, vers
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
@@ -1339,13 +1361,15 @@ func (client *Client) verifyHandleResponse(resp *http.Response) (VerifyResponse,
// permission.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 7.4
+// Generated from API version 7.5
// - name - The name of the key.
// - version - The version of the key.
// - parameters - The parameters for wrap operation.
// - options - WrapKeyOptions contains the optional parameters for the Client.WrapKey method.
func (client *Client) WrapKey(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *WrapKeyOptions) (WrapKeyResponse, error) {
var err error
+ ctx, endSpan := runtime.StartSpan(ctx, "Client.WrapKey", client.internal.Tracer(), nil)
+ defer func() { endSpan(err) }()
req, err := client.wrapKeyCreateRequest(ctx, name, version, parameters, options)
if err != nil {
return WrapKeyResponse{}, err
@@ -1375,7 +1399,7 @@ func (client *Client) wrapKeyCreateRequest(ctx context.Context, name string, ver
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("api-version", "7.4")
+ reqQP.Set("api-version", "7.5")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Accept"] = []string{"application/json"}
if err := runtime.MarshalAsJSON(req, parameters); err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go
index 3d169e2d1fe..8f5c30c9f1c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go
@@ -192,15 +192,14 @@ func PossibleKeyEncryptionAlgorithmValues() []KeyEncryptionAlgorithm {
}
}
-// KeyRotationPolicyAction - The type of the action.
+// KeyRotationPolicyAction - The type of the action. The value should be compared case-insensitively.
type KeyRotationPolicyAction string
const (
- // KeyRotationPolicyActionNotify - Trigger event grid events. For preview, the notification time is not configurable and it
- // is default to 30 days before expiry.
- KeyRotationPolicyActionNotify KeyRotationPolicyAction = "notify"
+ // KeyRotationPolicyActionNotify - Trigger Event Grid events. Defaults to 30 days before expiry. Key Vault only.
+ KeyRotationPolicyActionNotify KeyRotationPolicyAction = "Notify"
// KeyRotationPolicyActionRotate - Rotate the key based on the key policy.
- KeyRotationPolicyActionRotate KeyRotationPolicyAction = "rotate"
+ KeyRotationPolicyActionRotate KeyRotationPolicyAction = "Rotate"
)
// PossibleKeyRotationPolicyActionValues returns the possible values for the KeyRotationPolicyAction const type.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go
index edcd0d52cb2..3d67fa0e48d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go
@@ -37,7 +37,12 @@ func NewClient(vaultURL string, credential azcore.TokenCredential, options *Clie
DisableChallengeResourceVerification: options.DisableChallengeResourceVerification,
},
)
- azcoreClient, err := azcore.NewClient("azkeys.Client", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions)
+ azcoreClient, err := azcore.NewClient(moduleName, version, runtime.PipelineOptions{
+ PerRetry: []policy.Policy{authPolicy},
+ Tracing: runtime.TracingOptions{
+ Namespace: "Microsoft.KeyVault",
+ },
+ }, &options.ClientOptions)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go
index b1d72c40cc5..6faae411ef9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go
@@ -195,6 +195,9 @@ type KeyAttributes struct {
// READ-ONLY; Creation time in UTC.
Created *time.Time
+ // READ-ONLY; The underlying HSM Platform.
+ HSMPlatform *string
+
// READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0.
RecoverableDays *int32
@@ -360,7 +363,7 @@ type LifetimeActionTrigger struct {
// LifetimeActionType - The action that will be executed.
type LifetimeActionType struct {
- // The type of the action.
+ // The type of the action. The value should be compared case-insensitively.
Type *KeyRotationPolicyAction
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go
index c83ccb2c5f2..d33da63e18e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go
@@ -399,6 +399,7 @@ func (k KeyAttributes) MarshalJSON() ([]byte, error) {
populate(objectMap, "enabled", k.Enabled)
populateTimeUnix(objectMap, "exp", k.Expires)
populate(objectMap, "exportable", k.Exportable)
+ populate(objectMap, "HSMPlatform", k.HSMPlatform)
populateTimeUnix(objectMap, "nbf", k.NotBefore)
populate(objectMap, "recoverableDays", k.RecoverableDays)
populate(objectMap, "recoveryLevel", k.RecoveryLevel)
@@ -427,6 +428,9 @@ func (k *KeyAttributes) UnmarshalJSON(data []byte) error {
case "exportable":
err = unpopulate(val, "Exportable", &k.Exportable)
delete(rawMsg, key)
+ case "HSMPlatform":
+ err = unpopulate(val, "HSMPlatform", &k.HSMPlatform)
+ delete(rawMsg, key)
case "nbf":
err = unpopulateTimeUnix(val, "NotBefore", &k.NotBefore)
delete(rawMsg, key)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go
index 891d21c4da1..53dc2ebd2d1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go
@@ -7,6 +7,6 @@
package azkeys
const (
- moduleName = "azkeys"
- version = "v1.0.1"
+ moduleName = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys"
+ version = "v1.1.0"
)
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
index 6612feb4bf8..1841d146f5f 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
@@ -59,6 +59,8 @@ added, it doesn't exist in real life. As such I've put a PEM decoder into here.
// For details see https://aka.ms/msal-net-authenticationresult
type AuthResult = base.AuthResult
+type AuthenticationScheme = authority.AuthenticationScheme
+
type Account = shared.Account
// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
@@ -454,6 +456,33 @@ func WithClaims(claims string) interface {
}
}
+// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
+func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface {
+ AcquireSilentOption
+ AcquireByCredentialOption
+ options.CallOption
+} {
+ return struct {
+ AcquireSilentOption
+ AcquireByCredentialOption
+ options.CallOption
+ }{
+ CallOption: options.NewCallOption(
+ func(a any) error {
+ switch t := a.(type) {
+ case *acquireTokenSilentOptions:
+ t.authnScheme = authnScheme
+ case *acquireTokenByCredentialOptions:
+ t.authnScheme = authnScheme
+ default:
+ return fmt.Errorf("unexpected options type %T", a)
+ }
+ return nil
+ },
+ ),
+ }
+}
+
// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New].
// This option is valid for any token acquisition method.
func WithTenantID(tenantID string) interface {
@@ -499,6 +528,7 @@ func WithTenantID(tenantID string) interface {
type acquireTokenSilentOptions struct {
account Account
claims, tenantID string
+ authnScheme AuthenticationScheme
}
// AcquireSilentOption is implemented by options for AcquireTokenSilent
@@ -549,6 +579,7 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts
Credential: cca.cred,
IsAppCache: o.account.IsZero(),
TenantID: o.tenantID,
+ AuthnScheme: o.authnScheme,
}
return cca.base.AcquireTokenSilent(ctx, silentParameters)
@@ -614,6 +645,7 @@ func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redir
// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential
type acquireTokenByCredentialOptions struct {
claims, tenantID string
+ authnScheme AuthenticationScheme
}
// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential
@@ -637,7 +669,9 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string,
authParams.Scopes = scopes
authParams.AuthorizationType = authority.ATClientCredentials
authParams.Claims = o.claims
-
+ if o.authnScheme != nil {
+ authParams.AuthnScheme = o.authnScheme
+ }
token, err := cca.base.Token.Credential(ctx, authParams, cca.cred)
if err != nil {
return AuthResult{}, err
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
index 5f68384f68b..09a0d92f520 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
@@ -54,6 +54,7 @@ type AcquireTokenSilentParameters struct {
UserAssertion string
AuthorizationType authority.AuthorizeType
Claims string
+ AuthnScheme authority.AuthenticationScheme
}
// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow.
@@ -289,6 +290,9 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
authParams.AuthorizationType = silent.AuthorizationType
authParams.Claims = silent.Claims
authParams.UserAssertion = silent.UserAssertion
+ if silent.AuthnScheme != nil {
+ authParams.AuthnScheme = silent.AuthnScheme
+ }
m := b.pmanager
if authParams.AuthorizationType != authority.ATOnBehalfOf {
@@ -313,6 +317,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if silent.Claims == "" {
ar, err = AuthResultFromStorage(storageTokenResponse)
if err == nil {
+ ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
}
@@ -417,6 +422,11 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au
if err == nil && b.cacheAccessor != nil {
err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key})
}
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
index 5d4c9f1d1f3..f9be90276da 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
@@ -12,6 +12,7 @@ import (
internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
)
@@ -75,12 +76,14 @@ type AccessToken struct {
ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
CachedAt internalTime.Unix `json:"cached_at,omitempty"`
UserAssertionHash string `json:"user_assertion_hash,omitempty"`
+ TokenType string `json:"token_type,omitempty"`
+ AuthnSchemeKeyID string `json:"keyid,omitempty"`
AdditionalFields map[string]interface{}
}
// NewAccessToken is the constructor for AccessToken.
-func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken {
+func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
return AccessToken{
HomeAccountID: homeID,
Environment: env,
@@ -92,6 +95,8 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
CachedAt: internalTime.Unix{T: cachedAt.UTC()},
ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
+ TokenType: tokenType,
+ AuthnSchemeKeyID: authnSchemeKeyID,
}
}
@@ -101,6 +106,11 @@ func (a AccessToken) Key() string {
[]string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
shared.CacheKeySeparator,
)
+ // add token type to key for new access tokens types. skip for bearer token type to
+ // preserve fwd and back compat between a common cache and msal clients
+ if !strings.EqualFold(a.TokenType, authority.AccessTokenTypeBearer) {
+ key = strings.Join([]string{key, a.TokenType}, shared.CacheKeySeparator)
+ }
return strings.ToLower(key)
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
index 5e1cae0b8a3..c0931833064 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
@@ -41,6 +41,8 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.
realm := authParameters.AuthorityInfo.Tenant
clientID := authParameters.ClientID
scopes := authParameters.Scopes
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
+ tokenType := authParameters.AuthnScheme.AccessTokenType()
// fetch metadata if instanceDiscovery is enabled
aliases := []string{authParameters.AuthorityInfo.Host}
@@ -57,7 +59,7 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.
// errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
// TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
- accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest)
+ accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest, tokenType, authnSchemeKeyID)
if err == nil {
tr.AccessToken = accessToken
}
@@ -92,7 +94,7 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
userAssertionHash := authParameters.AssertionHash()
cachedAt := time.Now()
-
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
var account shared.Account
if len(tokenResponse.RefreshToken) > 0 {
@@ -116,6 +118,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
+ tokenResponse.TokenType,
+ authnSchemeKeyID,
)
if authParameters.AuthorizationType == authority.ATOnBehalfOf {
accessToken.UserAssertionHash = userAssertionHash // get Hash method on this
@@ -215,7 +219,7 @@ func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo auth
return m.aadCache[authorityInfo.Host], nil
}
-func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) {
+func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey, tokenType, authnSchemeKeyID string) (AccessToken, error) {
m.contractMu.RLock()
defer m.contractMu.RUnlock()
if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok {
@@ -224,9 +228,11 @@ func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientI
// an issue, however if it does become a problem then we know where to look.
for _, at := range accessTokens {
if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash {
- if checkAlias(at.Environment, envAliases) {
- if isMatchingScopes(scopes, at.Scopes) {
- return at, nil
+ if at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID {
+ if checkAlias(at.Environment, envAliases) {
+ if isMatchingScopes(scopes, at.Scopes) {
+ return at, nil
+ }
}
}
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
index d3a39e005ca..2221e60c437 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
@@ -82,6 +82,39 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
return scopeCounter == len(scopesOne)
}
+// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
+// it contains an uppercase character (v1.1+ keys are all lowercase)
+func needsUpgrade(key string) bool {
+ for _, r := range key {
+ if 'A' <= r && r <= 'Z' {
+ return true
+ }
+ }
+ return false
+}
+
+// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
+// the v1.0 item. Callers must hold an exclusive lock on m.
+func upgrade[T any](m map[string]T, k string) T {
+ v1_1Key := strings.ToLower(k)
+ v, ok := m[k]
+ if !ok {
+ // another goroutine did the upgrade while this one was waiting for the write lock
+ return m[v1_1Key]
+ }
+ if v2, ok := m[v1_1Key]; ok {
+ // cache has an equivalent v1.1+ item, which we prefer because we know it was added
+ // by a newer version of the module and is therefore more likely to remain valid.
+ // The v1.0 item may have expired because only v1.0 or earlier would update it.
+ v = v2
+ } else {
+ // add an equivalent item according to the v1.1 schema
+ m[v1_1Key] = v
+ }
+ delete(m, k)
+ return v
+}
+
// Read reads a storage token from the cache if it exists.
func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
tr := TokenResponse{}
@@ -89,6 +122,8 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams)
realm := authParameters.AuthorityInfo.Tenant
clientID := authParameters.ClientID
scopes := authParameters.Scopes
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
+ tokenType := authParameters.AuthnScheme.AccessTokenType()
// fetch metadata if instanceDiscovery is enabled
aliases := []string{authParameters.AuthorityInfo.Host}
@@ -100,7 +135,7 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams)
aliases = metadata.Aliases
}
- accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes)
+ accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID)
tr.AccessToken = accessToken
if homeAccountID == "" {
@@ -140,6 +175,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
clientID := authParameters.ClientID
target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
cachedAt := time.Now()
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
var account shared.Account
@@ -161,6 +197,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
+ tokenResponse.TokenType,
+ authnSchemeKeyID,
)
// Since we have a valid access token, cache it before moving on.
@@ -248,21 +286,27 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info)
return m.aadCache[authorityInfo.Host], nil
}
-func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken {
+func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
m.contractMu.RLock()
- defer m.contractMu.RUnlock()
// TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
// this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
// an issue, however if it does become a problem then we know where to look.
- for _, at := range m.contract.AccessTokens {
+ for k, at := range m.contract.AccessTokens {
if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
- if checkAlias(at.Environment, envAliases) {
- if isMatchingScopes(scopes, at.Scopes) {
+ if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
+ if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ at = upgrade(m.contract.AccessTokens, k)
+ }
return at
}
}
}
}
+ m.contractMu.RUnlock()
return AccessToken{}
}
@@ -303,15 +347,21 @@ func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID,
// If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
// https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
m.contractMu.RLock()
- defer m.contractMu.RUnlock()
for _, matcher := range matchers {
- for _, rt := range m.contract.RefreshTokens {
+ for k, rt := range m.contract.RefreshTokens {
if matcher(rt) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ rt = upgrade(m.contract.RefreshTokens, k)
+ }
return rt, nil
}
}
}
+ m.contractMu.RUnlock()
return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
}
@@ -333,14 +383,20 @@ func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) erro
func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- for _, idt := range m.contract.IDTokens {
+ for k, idt := range m.contract.IDTokens {
if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
if checkAlias(idt.Environment, envAliases) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ idt = upgrade(m.contract.IDTokens, k)
+ }
return idt, nil
}
}
}
+ m.contractMu.RUnlock()
return IDToken{}, fmt.Errorf("token not found")
}
@@ -379,7 +435,6 @@ func (m *Manager) Account(homeAccountID string) shared.Account {
func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
m.contractMu.RLock()
- defer m.contractMu.RUnlock()
// You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
// We only use a map because the storage contract shared between all language implementations says use a map.
@@ -387,11 +442,18 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s
// a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
// or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
// is really low (say 2). Each hash is more expensive than the entire iteration.
- for _, acc := range m.contract.Accounts {
+ for k, acc := range m.contract.Accounts {
if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ acc = upgrade(m.contract.Accounts, k)
+ }
return acc, nil
}
}
+ m.contractMu.RUnlock()
return shared.Account{}, fmt.Errorf("account not found")
}
@@ -405,13 +467,18 @@ func (m *Manager) writeAccount(account shared.Account) error {
func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- for _, app := range m.contract.AppMetaData {
+ for k, app := range m.contract.AppMetaData {
if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ app = upgrade(m.contract.AppMetaData, k)
+ }
return app, nil
}
}
+ m.contractMu.RUnlock()
return AppMetaData{}, fmt.Errorf("not found")
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
index ebd86e2baf9..ef8d908a444 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
@@ -119,6 +119,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
return accesstokens.TokenResponse{}, err
}
return accesstokens.TokenResponse{
+ TokenType: authParams.AuthnScheme.AccessTokenType(),
AccessToken: tr.AccessToken,
ExpiresOn: internalTime.DurationTime{
T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
index 003d38648a6..a7b7b0742d8 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
@@ -380,6 +380,12 @@ func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.Auth
func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) {
resp := TokenResponse{}
+ if authParams.AuthnScheme != nil {
+ trParams := authParams.AuthnScheme.TokenRequestParams()
+ for k, v := range trParams {
+ qv.Set(k, v)
+ }
+ }
err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp)
if err != nil {
return resp, err
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
index 3dd61d5b5f0..3107b45c113 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
@@ -168,6 +168,7 @@ type TokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
+ TokenType string `json:"token_type"`
FamilyID string `json:"foci"`
IDToken IDToken `json:"id_token"`
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
index 7b2ccb4f5d2..9d60734f88e 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
@@ -29,6 +29,7 @@ const (
defaultAPIVersion = "2021-10-01"
imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion
autoDetectRegion = "TryAutoDetect"
+ AccessTokenTypeBearer = "Bearer"
)
// These are various hosts that host AAD Instance discovery endpoints.
@@ -138,6 +139,39 @@ const (
ADFS = "ADFS"
)
+// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
+type AuthenticationScheme interface {
+ // Extra parameters that are added to the request to the /token endpoint.
+ TokenRequestParams() map[string]string
+ // Key ID of the public / private key pair used by the encryption algorithm, if any.
+ // Tokens obtained by authentication schemes that use this are bound to the KeyId, i.e.
+ // if a different kid is presented, the access token cannot be used.
+ KeyID() string
+ // Creates the access token that goes into an Authorization HTTP header.
+ FormatAccessToken(accessToken string) (string, error)
+ //Expected to match the token_type parameter returned by ESTS. Used to disambiguate
+ // between ATs of different types (e.g. Bearer and PoP) when loading from cache etc.
+ AccessTokenType() string
+}
+
+// default authn scheme realizing AuthenticationScheme for "Bearer" tokens
+type BearerAuthenticationScheme struct{}
+
+var bearerAuthnScheme BearerAuthenticationScheme
+
+func (ba *BearerAuthenticationScheme) TokenRequestParams() map[string]string {
+ return nil
+}
+func (ba *BearerAuthenticationScheme) KeyID() string {
+ return ""
+}
+func (ba *BearerAuthenticationScheme) FormatAccessToken(accessToken string) (string, error) {
+ return accessToken, nil
+}
+func (ba *BearerAuthenticationScheme) AccessTokenType() string {
+ return AccessTokenTypeBearer
+}
+
// AuthParams represents the parameters used for authorization for token acquisition.
type AuthParams struct {
AuthorityInfo Info
@@ -180,6 +214,8 @@ type AuthParams struct {
LoginHint string
// DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page
DomainHint string
+ // AuthnScheme is an optional scheme for formatting access tokens
+ AuthnScheme AuthenticationScheme
}
// NewAuthParams creates an authorization parameters object.
@@ -188,6 +224,7 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams {
ClientID: clientID,
AuthorityInfo: authorityInfo,
CorrelationID: uuid.New().String(),
+ AuthnScheme: &bearerAuthnScheme,
}
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
index 2ac2d09e4fa..eb16b405c4b 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
@@ -5,4 +5,4 @@
package version
// Version is the version of this client package that is communicated to the server.
-const Version = "1.1.1"
+const Version = "1.2.0"
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
index 88b217dedda..e346ff3dffd 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
@@ -47,9 +47,11 @@ import (
// For details see https://aka.ms/msal-net-authenticationresult
type AuthResult = base.AuthResult
+type AuthenticationScheme = authority.AuthenticationScheme
+
type Account = shared.Account
-var errNoAccount = errors.New("no account was specified with public.WithAccount(), or the specified account is invalid")
+var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
// clientOptions configures the Client's behavior.
type clientOptions struct {
@@ -211,6 +213,33 @@ func WithClaims(claims string) interface {
}
}
+// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
+func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface {
+ AcquireSilentOption
+ AcquireInteractiveOption
+ options.CallOption
+} {
+ return struct {
+ AcquireSilentOption
+ AcquireInteractiveOption
+ options.CallOption
+ }{
+ CallOption: options.NewCallOption(
+ func(a any) error {
+ switch t := a.(type) {
+ case *acquireTokenSilentOptions:
+ t.authnScheme = authnScheme
+ case *interactiveAuthOptions:
+ t.authnScheme = authnScheme
+ default:
+ return fmt.Errorf("unexpected options type %T", a)
+ }
+ return nil
+ },
+ ),
+ }
+}
+
// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority].
// This option is valid for any token acquisition method.
func WithTenantID(tenantID string) interface {
@@ -260,6 +289,7 @@ func WithTenantID(tenantID string) interface {
type acquireTokenSilentOptions struct {
account Account
claims, tenantID string
+ authnScheme AuthenticationScheme
}
// AcquireSilentOption is implemented by options for AcquireTokenSilent
@@ -310,6 +340,7 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts
RequestType: accesstokens.ATPublic,
IsAppCache: false,
TenantID: o.tenantID,
+ AuthnScheme: o.authnScheme,
}
return pca.base.AcquireTokenSilent(ctx, silentParameters)
@@ -482,6 +513,7 @@ func (pca Client) RemoveAccount(ctx context.Context, account Account) error {
type interactiveAuthOptions struct {
claims, domainHint, loginHint, redirectURI, tenantID string
openURL func(url string) error
+ authnScheme AuthenticationScheme
}
// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive
@@ -628,6 +660,9 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string,
authParams.DomainHint = o.domainHint
authParams.State = uuid.New().String()
authParams.Prompt = "select_account"
+ if o.authnScheme != nil {
+ authParams.AuthnScheme = o.authnScheme
+ }
res, err := pca.browserLogin(ctx, redirectURL, authParams, o.openURL)
if err != nil {
return AuthResult{}, err
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
index b361c13867c..2264200c169 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
@@ -150,6 +150,18 @@ type Config struct {
// BaseEndpoint is an intermediary transfer location to a service specific
// BaseEndpoint on a service's Options.
BaseEndpoint *string
+
+ // DisableRequestCompression toggles if an operation request could be
+ // compressed or not. Will be set to false by default. This variable is sourced from
+ // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute
+ // disable_request_compression
+ DisableRequestCompression bool
+
+ // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be
+ // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively.
+ // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or
+ // the shared config profile attribute request_min_compression_size_bytes
+ RequestMinCompressSizeBytes int64
}
// NewConfig returns a new Config pointer that can be chained with builder
@@ -158,8 +170,7 @@ func NewConfig() *Config {
return &Config{}
}
-// Copy will return a shallow copy of the Config object. If any additional
-// configurations are provided they will be merged into the new config returned.
+// Copy will return a shallow copy of the Config object.
func (c Config) Copy() Config {
cp := c
return cp
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index c760c48e3dc..12a33149970 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.23.5"
+const goModuleVersion = "1.25.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
index 9bd0dfb1508..6d5f0079c2f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
@@ -139,16 +139,16 @@ func AddRecordResponseTiming(stack *middleware.Stack) error {
// raw response within the response metadata.
type rawResponseKey struct{}
-// addRawResponse middleware adds raw response on to the metadata
-type addRawResponse struct{}
+// AddRawResponse middleware adds raw response on to the metadata
+type AddRawResponse struct{}
// ID the identifier for the ClientRequestID
-func (m *addRawResponse) ID() string {
+func (m *AddRawResponse) ID() string {
return "AddRawResponseToMetadata"
}
// HandleDeserialize adds raw response on the middleware metadata
-func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
@@ -159,7 +159,7 @@ func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.Des
// AddRawResponseToMetadata adds middleware to the middleware stack that
// store raw response on to the metadata.
func AddRawResponseToMetadata(stack *middleware.Stack) error {
- return stack.Deserialize.Add(&addRawResponse{}, middleware.Before)
+ return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before)
}
// GetRawResponse returns raw response set on metadata
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
index 7ce48c611cd..e7d268c3da5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
@@ -11,18 +11,22 @@ import (
func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
// add error wrapper middleware before operation deserializers so that it can wrap the error response
// returned by operation deserializers
- return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before)
+ return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
}
-type requestIDRetriever struct {
+// RequestIDRetriever middleware captures the AWS service request ID from the
+// raw response.
+type RequestIDRetriever struct {
}
// ID returns the middleware identifier
-func (m *requestIDRetriever) ID() string {
+func (m *RequestIDRetriever) ID() string {
return "RequestIDRetriever"
}
-func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+// HandleDeserialize pulls the AWS request ID from the response, storing it in
+// operation metadata.
+func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
index af3447ddc98..db7cda42d92 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
@@ -64,12 +64,12 @@ var validChars = map[rune]bool{
'-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true,
}
-// requestUserAgent is a build middleware that set the User-Agent for the request.
-type requestUserAgent struct {
+// RequestUserAgent is a build middleware that set the User-Agent for the request.
+type RequestUserAgent struct {
sdkAgent, userAgent *smithyhttp.UserAgentBuilder
}
-// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
+// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
// request.
//
// User-Agent example:
@@ -79,12 +79,12 @@ type requestUserAgent struct {
// X-Amz-User-Agent example:
//
// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
-func newRequestUserAgent() *requestUserAgent {
+func NewRequestUserAgent() *RequestUserAgent {
userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
addProductName(userAgent)
addProductName(sdkAgent)
- r := &requestUserAgent{
+ r := &RequestUserAgent{
sdkAgent: sdkAgent,
userAgent: userAgent,
}
@@ -94,7 +94,7 @@ func newRequestUserAgent() *requestUserAgent {
return r
}
-func addSDKMetadata(r *requestUserAgent) {
+func addSDKMetadata(r *RequestUserAgent) {
r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName())
r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion)
r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS)
@@ -162,18 +162,18 @@ func AddRequestUserAgentMiddleware(stack *middleware.Stack) error {
return err
}
-func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) {
- id := (*requestUserAgent)(nil).ID()
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) {
+ id := (*RequestUserAgent)(nil).ID()
bm, ok := stack.Build.Get(id)
if !ok {
- bm = newRequestUserAgent()
+ bm = NewRequestUserAgent()
err := stack.Build.Add(bm, middleware.After)
if err != nil {
return nil, err
}
}
- requestUserAgent, ok := bm.(*requestUserAgent)
+ requestUserAgent, ok := bm.(*RequestUserAgent)
if !ok {
return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id)
}
@@ -182,34 +182,34 @@ func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error
}
// AddUserAgentKey adds the component identified by name to the User-Agent string.
-func (u *requestUserAgent) AddUserAgentKey(key string) {
+func (u *RequestUserAgent) AddUserAgentKey(key string) {
u.userAgent.AddKey(strings.Map(rules, key))
}
// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
-func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) {
+func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) {
u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value))
}
-// AddUserAgentKey adds the component identified by name to the User-Agent string.
-func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
+// AddSDKAgentKey adds the component identified by name to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
// TODO: should target sdkAgent
u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key))
}
-// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
-func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
+// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
// TODO: should target sdkAgent
u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value))
}
// ID the name of the middleware.
-func (u *requestUserAgent) ID() string {
+func (u *RequestUserAgent) ID() string {
return "UserAgent"
}
// HandleBuild adds or appends the constructed user agent to the request.
-func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
switch req := in.Request.(type) {
@@ -224,12 +224,12 @@ func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI
return next.HandleBuild(ctx, in)
}
-func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
+func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
const userAgent = "User-Agent"
updateHTTPHeader(request, userAgent, u.userAgent.Build())
}
-func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
+func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
const sdkAgent = "X-Amz-User-Agent"
updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build())
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
index 722ca34c6a0..dc703d482d2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
@@ -328,10 +328,12 @@ func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresO
middleware.LogAttempts = options.LogRetryAttempts
})
- if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil {
+ // index retry to before signing, if signing exists
+ if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil {
return err
}
- if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil {
+
+ if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
index f39a369ad84..febeb0482db 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
@@ -85,12 +85,12 @@ func (m *dynamicPayloadSigningMiddleware) HandleFinalize(
}
if req.IsHTTPS() {
- return (&unsignedPayload{}).HandleFinalize(ctx, in, next)
+ return (&UnsignedPayload{}).HandleFinalize(ctx, in, next)
}
- return (&computePayloadSHA256{}).HandleFinalize(ctx, in, next)
+ return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next)
}
-// unsignedPayload sets the SigV4 request payload hash to unsigned.
+// UnsignedPayload sets the SigV4 request payload hash to unsigned.
//
// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
// stored in the context. (e.g. application pre-computed SHA256 before making
@@ -98,21 +98,21 @@ func (m *dynamicPayloadSigningMiddleware) HandleFinalize(
//
// This middleware does not check the X-Amz-Content-Sha256 header, if that
// header is serialized a middleware must translate it into the context.
-type unsignedPayload struct{}
+type UnsignedPayload struct{}
// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation
// middleware stack
func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error {
- return stack.Finalize.Insert(&unsignedPayload{}, "ResolveEndpointV2", middleware.After)
+ return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
}
// ID returns the unsignedPayload identifier
-func (m *unsignedPayload) ID() string {
+func (m *UnsignedPayload) ID() string {
return computePayloadHashMiddlewareID
}
// HandleFinalize sets the payload hash magic value to the unsigned sentinel.
-func (m *unsignedPayload) HandleFinalize(
+func (m *UnsignedPayload) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
@@ -123,7 +123,7 @@ func (m *unsignedPayload) HandleFinalize(
return next.HandleFinalize(ctx, in)
}
-// computePayloadSHA256 computes SHA256 payload hash to sign.
+// ComputePayloadSHA256 computes SHA256 payload hash to sign.
//
// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
// stored in the context. (e.g. application pre-computed SHA256 before making
@@ -131,12 +131,12 @@ func (m *unsignedPayload) HandleFinalize(
//
// This middleware does not check the X-Amz-Content-Sha256 header, if that
// header is serialized a middleware must translate it into the context.
-type computePayloadSHA256 struct{}
+type ComputePayloadSHA256 struct{}
// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the
// operation middleware stack
func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error {
- return stack.Finalize.Insert(&computePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+ return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
}
// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the
@@ -147,13 +147,13 @@ func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error {
}
// ID is the middleware name
-func (m *computePayloadSHA256) ID() string {
+func (m *ComputePayloadSHA256) ID() string {
return computePayloadHashMiddlewareID
}
// HandleFinalize computes the payload hash for the request, storing it to the
// context. This is a no-op if a caller has previously set that value.
-func (m *computePayloadSHA256) HandleFinalize(
+func (m *ComputePayloadSHA256) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
@@ -196,35 +196,35 @@ func (m *computePayloadSHA256) HandleFinalize(
// Use this to disable computing the Payload SHA256 checksum and instead use
// UNSIGNED-PAYLOAD for the SHA256 value.
func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error {
- _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &unsignedPayload{})
+ _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{})
return err
}
-// contentSHA256Header sets the X-Amz-Content-Sha256 header value to
+// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to
// the Payload hash stored in the context.
-type contentSHA256Header struct{}
+type ContentSHA256Header struct{}
// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the
// operation middleware stack
func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
- return stack.Finalize.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
+ return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
}
// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware
// from the operation middleware stack
func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
- _, err := stack.Finalize.Remove((*contentSHA256Header)(nil).ID())
+ _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID())
return err
}
// ID returns the ContentSHA256HeaderMiddleware identifier
-func (m *contentSHA256Header) ID() string {
+func (m *ContentSHA256Header) ID() string {
return "SigV4ContentSHA256Header"
}
// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash
// stored in the context.
-func (m *contentSHA256Header) HandleFinalize(
+func (m *ContentSHA256Header) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
@@ -360,18 +360,21 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
return next.HandleFinalize(ctx, in)
}
-type streamingEventsPayload struct{}
+// StreamingEventsPayload signs input event stream messages.
+type StreamingEventsPayload struct{}
// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack.
func AddStreamingEventsPayload(stack *middleware.Stack) error {
- return stack.Finalize.Add(&streamingEventsPayload{}, middleware.Before)
+ return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before)
}
-func (s *streamingEventsPayload) ID() string {
+// ID identifies the middleware.
+func (s *StreamingEventsPayload) ID() string {
return computePayloadHashMiddlewareID
}
-func (s *streamingEventsPayload) HandleFinalize(
+// HandleFinalize marks the input stream to be signed with SigV4.
+func (s *StreamingEventsPayload) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
index 8fd14cecd23..a1ad20fe341 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
@@ -12,18 +12,20 @@ import (
func AddResponseErrorMiddleware(stack *middleware.Stack) error {
// add error wrapper middleware before request id retriever middleware so that it can wrap the error response
// returned by operation deserializers
- return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+ return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
}
-type responseErrorWrapper struct {
+// ResponseErrorWrapper wraps operation errors with ResponseError.
+type ResponseErrorWrapper struct {
}
// ID returns the middleware identifier
-func (m *responseErrorWrapper) ID() string {
+func (m *ResponseErrorWrapper) ID() string {
return "ResponseErrorWrapper"
}
-func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+// HandleDeserialize wraps the stack error with smithyhttp.ResponseError.
+func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index dbe24d3e125..38b390aa243 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,59 @@
+# v1.27.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-12-07)
+
+* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.12 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.25.11 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
index dfe62973221..50582d89d54 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
@@ -70,10 +70,16 @@ var defaultAWSConfigResolvers = []awsConfigResolver{
// httpBearerAuth authentication scheme.
resolveBearerAuthToken,
- // Sets the sdk app ID if present in shared config profile
+ // Sets the sdk app ID if present in env var or shared config profile
resolveAppID,
resolveBaseEndpoint,
+
+ // Sets the DisableRequestCompression if present in env var or shared config profile
+ resolveDisableRequestCompression,
+
+ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile
+ resolveRequestMinCompressSizeBytes,
}
// A Config represents a generic configuration value or set of values. This type
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
index fb0615aeb42..88550198cce 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
@@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
)
// CredentialsSourceName provides a name of the provider when config is
@@ -75,6 +76,9 @@ const (
awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS"
awsEndpointURL = "AWS_ENDPOINT_URL"
+ awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION"
+ awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES"
+
awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH"
)
@@ -271,6 +275,15 @@ type EnvConfig struct {
// corresponding endpoint resolution field.
BaseEndpoint string
+ // determine if request compression is allowed, default to false
+ // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION
+ DisableRequestCompression *bool
+
+ // inclusive threshold request body size to trigger compression,
+ // default to 10240 and must be within 0 and 10485760 bytes inclusive
+ // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES
+ RequestMinCompressSizeBytes *int64
+
// Whether S3Express auth is disabled.
//
// This will NOT prevent requests from being made to S3Express buckets, it
@@ -319,6 +332,13 @@ func NewEnvConfig() (EnvConfig, error) {
cfg.AppID = os.Getenv(awsSdkAppID)
+ if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil {
+ return cfg, err
+ }
+ if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil {
+ return cfg, err
+ }
+
if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil {
return cfg, err
}
@@ -383,6 +403,20 @@ func (c EnvConfig) getAppID(context.Context) (string, bool, error) {
return c.AppID, len(c.AppID) > 0, nil
}
+func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) {
+ if c.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *c.DisableRequestCompression, true, nil
+}
+
+func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) {
+ if c.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *c.RequestMinCompressSizeBytes, true, nil
+}
+
// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified,
// and not 0.
func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
@@ -639,6 +673,30 @@ func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
return nil
}
+func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value)
+ } else if v < 0 || v > max {
+ return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v)
+ }
+ if *dst == nil {
+ *dst = new(int64)
+ }
+
+ **dst = v
+ break
+ }
+
+ return nil
+}
+
func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index 50962ab387f..1e60413c13d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.25.11"
+const goModuleVersion = "1.27.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
index 25c05a5536b..06596c1b7c8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
@@ -207,6 +207,12 @@ type LoadOptions struct {
// The sdk app ID retrieved from env var or shared config to be added to request user agent header
AppID string
+ // Specifies whether an operation request could be compressed
+ DisableRequestCompression *bool
+
+ // The inclusive min bytes of a request body that could be compressed
+ RequestMinCompressSizeBytes *int64
+
// Whether S3 Express auth is disabled.
S3DisableExpressAuth *bool
}
@@ -256,6 +262,22 @@ func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) {
return o.AppID, len(o.AppID) > 0, nil
}
+// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions
+func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+ if o.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *o.DisableRequestCompression, true, nil
+}
+
+// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions
+func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+ if o.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *o.RequestMinCompressSizeBytes, true, nil
+}
+
// WithRegion is a helper function to construct functional options
// that sets Region on config's LoadOptions. Setting the region to
// an empty string, will result in the region value being ignored.
@@ -277,6 +299,30 @@ func WithAppID(ID string) LoadOptionsFunc {
}
}
+// WithDisableRequestCompression is a helper function to construct functional options
+// that sets DisableRequestCompression on config's LoadOptions.
+func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if DisableRequestCompression == nil {
+ return nil
+ }
+ o.DisableRequestCompression = DisableRequestCompression
+ return nil
+ }
+}
+
+// WithRequestMinCompressSizeBytes is a helper function to construct functional options
+// that sets RequestMinCompressSizeBytes on config's LoadOptions.
+func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if RequestMinCompressSizeBytes == nil {
+ return nil
+ }
+ o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes
+ return nil
+ }
+}
+
// getDefaultRegion returns DefaultRegion from config's LoadOptions
func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) {
if len(o.DefaultRegion) == 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
index d5235846011..13745fc98fd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
@@ -191,6 +191,40 @@ func getAppID(ctx context.Context, configs configs) (value string, found bool, e
return
}
+// disableRequestCompressionProvider provides access to the DisableRequestCompression
+type disableRequestCompressionProvider interface {
+ getDisableRequestCompression(context.Context) (bool, bool, error)
+}
+
+func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(disableRequestCompressionProvider); ok {
+ value, found, err = p.getDisableRequestCompression(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes
+type requestMinCompressSizeBytesProvider interface {
+ getRequestMinCompressSizeBytes(context.Context) (int64, bool, error)
+}
+
+func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok {
+ value, found, err = p.getRequestMinCompressSizeBytes(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
// ec2IMDSRegionProvider provides access to the ec2 imds region
// configuration value
type ec2IMDSRegionProvider interface {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
index b3b2c93cdc4..fde2e3980e0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
@@ -139,6 +139,33 @@ func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error {
return nil
}
+// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's
+// SharedConfig or EnvConfig
+func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error {
+ disable, _, err := getDisableRequestCompression(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ cfg.DisableRequestCompression = disable
+ return nil
+}
+
+// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's
+// SharedConfig or EnvConfig
+func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error {
+ minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs)
+ if err != nil {
+ return err
+ }
+ // must set a default min size 10240 if not configured
+ if !found {
+ minBytes = 10240
+ }
+ cfg.RequestMinCompressSizeBytes = minBytes
+ return nil
+}
+
// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default
// region if region had not been resolved from other sources.
func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
index 823eafe12b8..c546cb7d0f5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
@@ -17,6 +17,7 @@ import (
"github.com/aws/aws-sdk-go-v2/internal/ini"
"github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
"github.com/aws/smithy-go/logging"
+ smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
)
const (
@@ -30,7 +31,7 @@ const (
// Prefix for services section. It is referenced in profile via the services
// parameter to configure clients for service-specific parameters.
- servicesPrefix = `services`
+ servicesPrefix = `services `
// string equivalent for boolean
endpointDiscoveryDisabled = `false`
@@ -108,6 +109,11 @@ const (
endpointURL = "endpoint_url"
+ servicesSectionKey = "services"
+
+ disableRequestCompression = "disable_request_compression"
+ requestMinCompressionSizeBytes = "request_min_compression_size_bytes"
+
s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth"
)
@@ -316,8 +322,18 @@ type SharedConfig struct {
// corresponding endpoint resolution field.
BaseEndpoint string
- // Value to contain services section content.
- Services Services
+ // Services section config.
+ ServicesSectionName string
+ Services Services
+
+ // determine if request compression is allowed, default to false
+ // retrieved from config file's profile field disable_request_compression
+ DisableRequestCompression *bool
+
+ // inclusive threshold request body size to trigger compression,
+ // default to 10240 and must be within 0 and 10485760 bytes inclusive
+ // retrieved from config file's profile field request_min_compression_size_bytes
+ RequestMinCompressSizeBytes *int64
// Whether S3Express auth is disabled.
//
@@ -994,14 +1010,11 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
c.SSOSession = &ssoSession
}
- for _, sectionName := range sections.List() {
- if strings.HasPrefix(sectionName, servicesPrefix) {
- section, ok := sections.GetSection(sectionName)
- if ok {
- var svcs Services
- svcs.setFromIniSection(section)
- c.Services = svcs
- }
+ if len(c.ServicesSectionName) > 0 {
+ if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok {
+ var svcs Services
+ svcs.setFromIniSection(section)
+ c.Services = svcs
}
}
@@ -1104,6 +1117,13 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateString(&c.BaseEndpoint, section, endpointURL)
+ if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err)
+ }
+ if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err)
+ }
+
// Shared Credentials
creds := aws.Credentials{
AccessKeyID: section.String(accessKeyIDKey),
@@ -1116,9 +1136,61 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
c.Credentials = creds
}
+ updateString(&c.ServicesSectionName, section, servicesSectionKey)
+
+ return nil
+}
+
+func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v, ok := sec.Int(key)
+ if !ok {
+ return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key))
+ }
+ if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes {
+ return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v)
+ }
+ *bytes = new(int64)
+ **bytes = v
return nil
}
+func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch {
+ case v == "true":
+ *disable = new(bool)
+ **disable = true
+ case v == "false":
+ *disable = new(bool)
+ **disable = false
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v)
+ }
+ return nil
+}
+
+func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+ if c.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+ if c.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *c.DisableRequestCompression, true, nil
+}
+
func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error {
if !section.Has(key) {
return nil
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index 529591f9c02..0f571bce793 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,52 @@
+# v1.17.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2023-12-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.16.9 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
new file mode 100644
index 00000000000..c3f5dadcec9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
@@ -0,0 +1,48 @@
+package client
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
index df0e7575c44..9a869f89547 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
@@ -101,6 +101,7 @@ func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput
stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After)
stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After)
stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After)
+ addProtocolFinalizerMiddlewares(stack, options, "GetCredentials")
retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer})
middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
new file mode 100644
index 00000000000..748ee67244e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
index ddb28a66d1c..f2820d20eac 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
@@ -146,3 +146,19 @@ func stof(code int) smithy.ErrorFault {
}
return smithy.FaultClient
}
+
+func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %w", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %w", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index 16aacea8d22..ca8e4d24e8c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.16.9"
+const goModuleVersion = "1.17.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index 3381934716f..2ba9a260bae 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,24 @@
+# v1.15.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.11 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.14.9 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
index 9e3bdb0e66e..af58b6bb102 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
@@ -56,6 +56,7 @@ type GetDynamicDataOutput struct {
func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetDynamicData",
buildGetDynamicDataPath,
buildGetDynamicDataOutput)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
index 24845dccd6d..5111cc90cac 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
@@ -53,6 +53,7 @@ type GetIAMInfoOutput struct {
func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetIAMInfo",
buildGetIAMInfoPath,
buildGetIAMInfoOutput,
)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
index a87758ed302..dc8c09edf03 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
@@ -54,6 +54,7 @@ type GetInstanceIdentityDocumentOutput struct {
func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetInstanceIdentityDocument",
buildGetInstanceIdentityDocumentPath,
buildGetInstanceIdentityDocumentOutput,
)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
index cb0ce4c0004..869bfc9feb9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
@@ -56,6 +56,7 @@ type GetMetadataOutput struct {
func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetMetadata",
buildGetMetadataPath,
buildGetMetadataOutput)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
index 7b9b48912af..8c0572bb5c8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
@@ -45,6 +45,7 @@ type GetRegionOutput struct {
func addGetRegionMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetRegion",
buildGetInstanceIdentityDocumentPath,
buildGetRegionOutput,
)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
index 841f802c1a3..1f9ee97a5b7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
@@ -49,6 +49,7 @@ func addGetTokenMiddleware(stack *middleware.Stack, options Options) error {
err := addRequestMiddleware(stack,
options,
"PUT",
+ "GetToken",
buildGetTokenPath,
buildGetTokenOutput)
if err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
index 88aa61e9ad9..8903697244a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
@@ -45,6 +45,7 @@ type GetUserDataOutput struct {
func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetUserData",
buildGetUserDataPath,
buildGetUserDataOutput)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
new file mode 100644
index 00000000000..ad283cf825f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
@@ -0,0 +1,48 @@
+package imds
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
new file mode 100644
index 00000000000..d7540da3481
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
@@ -0,0 +1,20 @@
+package imds
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index 9979479a91a..8010ded7b62 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.14.9"
+const goModuleVersion = "1.15.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
index c8abd64916c..fc948c27d89 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
@@ -17,10 +17,11 @@ import (
func addAPIRequestMiddleware(stack *middleware.Stack,
options Options,
+ operation string,
getPath func(interface{}) (string, error),
getOutput func(*smithyhttp.Response) (interface{}, error),
) (err error) {
- err = addRequestMiddleware(stack, options, "GET", getPath, getOutput)
+ err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput)
if err != nil {
return err
}
@@ -44,6 +45,7 @@ func addAPIRequestMiddleware(stack *middleware.Stack,
func addRequestMiddleware(stack *middleware.Stack,
options Options,
method string,
+ operation string,
getPath func(interface{}) (string, error),
getOutput func(*smithyhttp.Response) (interface{}, error),
) (err error) {
@@ -101,6 +103,10 @@ func addRequestMiddleware(stack *middleware.Stack,
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil {
+ return fmt.Errorf("add protocol finalizers: %w", err)
+ }
+
// Retry support
return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
Retryer: options.Retryer,
@@ -283,3 +289,19 @@ func appendURIPath(base, add string) string {
}
return reqPath
}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %w", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %w", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index 8ad41847f8b..b62d57cb504 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,24 @@
+# v1.3.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.2.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index 44c5156ac2a..a99e10d8a96 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.2.8"
+const goModuleVersion = "1.3.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
index 7ea49d4ea40..849beffd7da 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
@@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig {
var partitions = []Partition{
{
ID: "aws",
- RegionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$",
+ RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$",
DefaultConfig: PartitionConfig{
Name: "aws",
DnsSuffix: "amazonaws.com",
@@ -90,6 +90,13 @@ var partitions = []Partition{
SupportsFIPS: nil,
SupportsDualStack: nil,
},
+ "ap-southeast-4": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
"aws-global": {
Name: nil,
DnsSuffix: nil,
@@ -160,6 +167,13 @@ var partitions = []Partition{
SupportsFIPS: nil,
SupportsDualStack: nil,
},
+ "il-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
"me-central-1": {
Name: nil,
DnsSuffix: nil,
@@ -340,4 +354,28 @@ var partitions = []Partition{
},
},
},
+ {
+ ID: "aws-iso-e",
+ RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-e",
+ DnsSuffix: "cloud.adc-e.uk",
+ DualStackDnsSuffix: "cloud.adc-e.uk",
+ SupportsFIPS: true,
+ SupportsDualStack: false,
+ },
+ Regions: map[string]RegionOverrides{},
+ },
+ {
+ ID: "aws-iso-f",
+ RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-f",
+ DnsSuffix: "csp.hci.ic.gov",
+ DualStackDnsSuffix: "csp.hci.ic.gov",
+ SupportsFIPS: true,
+ SupportsDualStack: false,
+ },
+ Regions: map[string]RegionOverrides{},
+ },
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
index ab107ca5511..f376f6908aa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
@@ -50,6 +50,9 @@
"ca-central-1" : {
"description" : "Canada (Central)"
},
+ "ca-west-1" : {
+ "description" : "Canada West (Calgary)"
+ },
"eu-central-1" : {
"description" : "Europe (Frankfurt)"
},
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index 172f6228638..b95cd39f422 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,24 @@
+# v2.6.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v2.5.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index 3f2d7085307..833b9115753 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.5.8"
+const goModuleVersion = "2.6.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
index 149f7f716d4..c0e54faff28 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
@@ -1,3 +1,15 @@
+# v1.8.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.7.3 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+
+# v1.7.2 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+
# v1.7.1 (2023-11-16)
* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
index 5080ebe68f5..6e0b906c346 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
@@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.7.1"
+const goModuleVersion = "1.8.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
index 661588c2276..ed77d083517 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
@@ -67,12 +67,8 @@ func unquote(s string) string {
// applies various legacy conversions to property values:
// - remote wrapping single/doublequotes
-// - expand escaped quote and newline sequences
func legacyStrconv(s string) string {
s = unquote(s)
- s = strings.ReplaceAll(s, `\"`, `"`)
- s = strings.ReplaceAll(s, `\'`, `'`)
- s = strings.ReplaceAll(s, `\n`, "\n")
return s
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
index ade75bf34e4..e3706b3c31b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
@@ -54,18 +54,7 @@ func (v Value) String() string {
// MapValue returns a map value for sub properties
func (v Value) MapValue() map[string]string {
- newlineParts := strings.Split(string(v.str), "\n")
- mp := make(map[string]string)
- for _, part := range newlineParts {
- operandParts := strings.Split(part, "=")
- if len(operandParts) < 2 {
- continue
- }
- key := strings.TrimSpace(operandParts[0])
- val := strings.TrimSpace(operandParts[1])
- mp[key] = val
- }
- return mp
+ return v.mp
}
// IntValue returns an integer value
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
index c2a206b03b9..cac6f926eb8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
@@ -1,3 +1,15 @@
+# v1.11.1 (2024-02-21)
+
+* No change notes available for this release.
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.10.4 (2023-12-07)
+
+* No change notes available for this release.
+
# v1.10.3 (2023-11-30)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
index 4a245da2962..c5ae0f8735d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
@@ -3,4 +3,4 @@
package acceptencoding
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.10.3"
+const goModuleVersion = "1.11.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index 5e433862c3b..38b0de2840e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,24 @@
+# v1.11.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.10.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index 27415898668..0af263c5ef6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.10.8"
+const goModuleVersion = "1.11.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md
index 55efccf9bf4..37a38b51787 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md
@@ -1,3 +1,57 @@
+# v1.29.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.28.3 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.28.1 (2024-02-15)
+
+* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field.
+
+# v1.28.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.9 (2024-01-05)
+
+* **Documentation**: Documentation updates for AWS Key Management Service (KMS).
+
+# v1.27.8 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.7 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.27.6 (2023-12-15)
+
+* **Documentation**: Documentation updates for AWS Key Management Service
+
+# v1.27.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.27.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
# v1.27.2 (2023-12-01)
* **Bug Fix**: Correct wrapping of errors in authentication workflow.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go
index d756de129f3..febc4135e3f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go
@@ -43,6 +43,8 @@ func New(options Options, optFns ...func(*Options)) *Client {
setResolvedDefaultsMode(&options)
+ resolveRetryer(&options)
+
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
@@ -55,10 +57,12 @@ func New(options Options, optFns ...func(*Options)) *Client {
fn(&options)
}
- resolveRetryer(&options)
+ finalizeRetryMaxAttempts(&options)
ignoreAnonymousAuth(&options)
+ wrapWithAnonymousAuth(&options)
+
resolveAuthSchemes(&options)
client := &Client{
@@ -86,7 +90,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -334,7 +338,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -350,17 +362,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
}
func addClientUserAgent(stack *middleware.Stack, options Options) error {
- if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "kms", goModuleVersion)(stack); err != nil {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
return err
}
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "kms", goModuleVersion)
if len(options.AppID) > 0 {
- return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack)
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
}
return nil
}
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
@@ -379,12 +411,48 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ })
+ if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -417,12 +485,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go
index 3cc5e2acfd2..4cabfbcbbe6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -21,7 +20,10 @@ import (
// the Key Management Service Developer Guide. Cross-account use: No. You cannot
// perform this operation on a KMS key in a different Amazon Web Services account.
// Required permissions: kms:CancelKeyDeletion (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: ScheduleKeyDeletion
+// (key policy) Related operations: ScheduleKeyDeletion Eventual consistency: The
+// KMS API follows an eventual consistency model. For more information, see KMS
+// eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) CancelKeyDeletion(ctx context.Context, params *CancelKeyDeletionInput, optFns ...func(*Options)) (*CancelKeyDeletionOutput, error) {
if params == nil {
params = &CancelKeyDeletionInput{}
@@ -86,25 +88,25 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -125,7 +127,7 @@ func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stac
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelKeyDeletion(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go
index 5112a2dc299..92021a497b1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -67,6 +66,10 @@ import (
// - DescribeCustomKeyStores
// - DisconnectCustomKeyStore
// - UpdateCustomKeyStore
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCustomKeyStoreInput, optFns ...func(*Options)) (*ConnectCustomKeyStoreOutput, error) {
if params == nil {
params = &ConnectCustomKeyStoreInput{}
@@ -122,25 +125,25 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware.
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -161,7 +164,7 @@ func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware.
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opConnectCustomKeyStore(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go
index a5d2a5872af..1ef799fb995 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -42,6 +41,10 @@ import (
// - DeleteAlias
// - ListAliases
// - UpdateAlias
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) {
if params == nil {
params = &CreateAliasInput{}
@@ -117,25 +120,25 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -156,7 +159,7 @@ func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAlias(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go
index 5c9b2e28e54..00679206d4c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -69,6 +68,10 @@ import (
// - DescribeCustomKeyStores
// - DisconnectCustomKeyStore
// - UpdateCustomKeyStore
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) CreateCustomKeyStore(ctx context.Context, params *CreateCustomKeyStoreInput, optFns ...func(*Options)) (*CreateCustomKeyStoreOutput, error) {
if params == nil {
params = &CreateCustomKeyStoreInput{}
@@ -177,7 +180,7 @@ type CreateCustomKeyStoreInput struct {
// in the Amazon Web Services account and Region.
// - An external key store with PUBLIC_ENDPOINT connectivity cannot use the same
// XksProxyUriEndpoint value as an external key store with VPC_ENDPOINT_SERVICE
- // connectivity in the same Amazon Web Services Region.
+ // connectivity in this Amazon Web Services Region.
// - Each external key store with VPC_ENDPOINT_SERVICE connectivity must have its
// own private DNS name. The XksProxyUriEndpoint value for external key stores
// with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must be unique in
@@ -242,25 +245,25 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -281,7 +284,7 @@ func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.S
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCustomKeyStore(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go
index e1c94b06b04..691a17b45aa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -47,6 +46,10 @@ import (
// - ListRetirableGrants
// - RetireGrant
// - RevokeGrant
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optFns ...func(*Options)) (*CreateGrantOutput, error) {
if params == nil {
params = &CreateGrantInput{}
@@ -204,25 +207,25 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -243,7 +246,7 @@ func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGrant(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go
index a022d25b763..85bf140354b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -131,6 +130,10 @@ import (
// - DescribeKey
// - ListKeys
// - ScheduleKeyDeletion
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns ...func(*Options)) (*CreateKeyOutput, error) {
if params == nil {
params = &CreateKeyInput{}
@@ -154,7 +157,8 @@ type CreateKeyInput struct {
// information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
// in the Key Management Service Developer Guide. Use this parameter only when you
// intend to prevent the principal that is making the request from making a
- // subsequent PutKeyPolicy request on the KMS key.
+ // subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+ // request on the KMS key.
BypassPolicyLockoutSafetyCheck bool
// Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
@@ -374,25 +378,25 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -413,7 +417,7 @@ func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, optio
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go
index eb7f88e7b43..202bce68cf8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -55,21 +54,25 @@ import (
// or any Amazon Web Services SDK. Use the Recipient parameter to provide the
// attestation document for the enclave. Instead of the plaintext data, the
// response includes the plaintext data encrypted with the public key from the
-// attestation document ( CiphertextForRecipient ).For information about the
+// attestation document ( CiphertextForRecipient ). For information about the
// interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon
// Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
-// in the Key Management Service Developer Guide.. The KMS key that you use for
-// this operation must be in a compatible key state. For details, see Key states
-// of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
-// in the Key Management Service Developer Guide. Cross-account use: Yes. If you
-// use the KeyId parameter to identify a KMS key in a different Amazon Web
-// Services account, specify the key ARN or the alias ARN of the KMS key. Required
+// in the Key Management Service Developer Guide. The KMS key that you use for this
+// operation must be in a compatible key state. For details, see Key states of KMS
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in
+// the Key Management Service Developer Guide. Cross-account use: Yes. If you use
+// the KeyId parameter to identify a KMS key in a different Amazon Web Services
+// account, specify the key ARN or the alias ARN of the KMS key. Required
// permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (key policy) Related operations:
// - Encrypt
// - GenerateDataKey
// - GenerateDataKeyPair
// - ReEncrypt
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) Decrypt(ctx context.Context, params *DecryptInput, optFns ...func(*Options)) (*DecryptOutput, error) {
if params == nil {
params = &DecryptInput{}
@@ -146,7 +149,7 @@ type DecryptInput struct {
// get the alias name and alias ARN, use ListAliases .
KeyId *string
- // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc)
+ // A signed attestation document (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-concepts.html#term-attestdoc)
// from an Amazon Web Services Nitro enclave and the encryption algorithm to use
// with the enclave's public key. The only valid encryption algorithm is
// RSAES_OAEP_SHA_256 . This parameter only supports attestation documents for
@@ -216,25 +219,25 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -255,7 +258,7 @@ func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecrypt(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go
index aa989d54c44..5f611dd9dcb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -32,6 +31,10 @@ import (
// - CreateAlias
// - ListAliases
// - UpdateAlias
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) {
if params == nil {
params = &DeleteAliasInput{}
@@ -87,25 +90,25 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -126,7 +129,7 @@ func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAlias(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go
index 0dc8fae3dd0..a6711c6f1c8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -46,6 +45,10 @@ import (
// - DescribeCustomKeyStores
// - DisconnectCustomKeyStore
// - UpdateCustomKeyStore
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DeleteCustomKeyStore(ctx context.Context, params *DeleteCustomKeyStoreInput, optFns ...func(*Options)) (*DeleteCustomKeyStoreOutput, error) {
if params == nil {
params = &DeleteCustomKeyStoreInput{}
@@ -101,25 +104,25 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -140,7 +143,7 @@ func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.S
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCustomKeyStore(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go
index b8e39caf76f..26ac0a15152 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -26,6 +25,10 @@ import (
// (key policy) Related operations:
// - GetParametersForImport
// - ImportKeyMaterial
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteImportedKeyMaterialInput, optFns ...func(*Options)) (*DeleteImportedKeyMaterialOutput, error) {
if params == nil {
params = &DeleteImportedKeyMaterialInput{}
@@ -86,25 +89,25 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -125,7 +128,7 @@ func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middlew
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go
index 0b93c3d30fb..3204174c2f0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -46,6 +45,10 @@ import (
// - DeleteCustomKeyStore
// - DisconnectCustomKeyStore
// - UpdateCustomKeyStore
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DescribeCustomKeyStores(ctx context.Context, params *DescribeCustomKeyStoresInput, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) {
if params == nil {
params = &DescribeCustomKeyStoresInput{}
@@ -133,25 +136,25 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -169,7 +172,7 @@ func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middlewar
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomKeyStores(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go
index a0389d6e2a7..6070fb4343a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -20,12 +19,13 @@ import (
// of the key material. It includes fields, like KeySpec , that help you
// distinguish different types of KMS keys. It also displays the key usage
// (encryption, signing, or generating and verifying MACs) and the algorithms that
-// the KMS key supports. For multi-Region keys , DescribeKey displays the primary
-// key and all related replica keys. For KMS keys in CloudHSM key stores , it
-// includes information about the key store, such as the key store ID and the
-// CloudHSM cluster ID. For KMS keys in external key stores , it includes the
-// custom key store ID and the ID of the external key. DescribeKey does not return
-// the following information:
+// the KMS key supports. For multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
+// , DescribeKey displays the primary key and all related replica keys. For KMS
+// keys in CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html)
+// , it includes information about the key store, such as the key store ID and the
+// CloudHSM cluster ID. For KMS keys in external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html)
+// , it includes the custom key store ID and the ID of the external key.
+// DescribeKey does not return the following information:
// - Aliases associated with the KMS key. To get this information, use
// ListAliases .
// - Whether automatic key rotation is enabled on the KMS key. To get this
@@ -52,6 +52,10 @@ import (
// - ListKeys
// - ListResourceTags
// - ListRetirableGrants
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optFns ...func(*Options)) (*DescribeKeyOutput, error) {
if params == nil {
params = &DescribeKeyInput{}
@@ -130,25 +134,25 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -169,7 +173,7 @@ func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go
index 5da8b601df6..eeb57e55428 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -21,7 +20,10 @@ import (
// in the Key Management Service Developer Guide. Cross-account use: No. You cannot
// perform this operation on a KMS key in a different Amazon Web Services account.
// Required permissions: kms:DisableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: EnableKey
+// (key policy) Related operations: EnableKey Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns ...func(*Options)) (*DisableKeyOutput, error) {
if params == nil {
params = &DisableKeyInput{}
@@ -81,25 +83,25 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -120,7 +122,7 @@ func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, opti
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go
index 171f8d1736b..6737c36a7af 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -34,6 +33,10 @@ import (
// (key policy) Related operations:
// - EnableKeyRotation
// - GetKeyRotationStatus
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotationInput, optFns ...func(*Options)) (*DisableKeyRotationOutput, error) {
if params == nil {
params = &DisableKeyRotationInput{}
@@ -97,25 +100,25 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -136,7 +139,7 @@ func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Sta
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKeyRotation(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go
index 507bf1721d5..f9b7daae145 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -37,6 +36,10 @@ import (
// - DeleteCustomKeyStore
// - DescribeCustomKeyStores
// - UpdateCustomKeyStore
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *DisconnectCustomKeyStoreInput, optFns ...func(*Options)) (*DisconnectCustomKeyStoreOutput, error) {
if params == nil {
params = &DisconnectCustomKeyStoreInput{}
@@ -92,25 +95,25 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -131,7 +134,7 @@ func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middlewa
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisconnectCustomKeyStore(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go
index cb37ef9c68e..1395c9df67b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -18,7 +17,10 @@ import (
// in the Key Management Service Developer Guide. Cross-account use: No. You cannot
// perform this operation on a KMS key in a different Amazon Web Services account.
// Required permissions: kms:EnableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: DisableKey
+// (key policy) Related operations: DisableKey Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) EnableKey(ctx context.Context, params *EnableKeyInput, optFns ...func(*Options)) (*EnableKeyOutput, error) {
if params == nil {
params = &EnableKeyInput{}
@@ -78,25 +80,25 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -117,7 +119,7 @@ func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, optio
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go
index b5953e228fc..06e237c808f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -43,6 +42,10 @@ import (
// (key policy) Related operations:
// - DisableKeyRotation
// - GetKeyRotationStatus
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotationInput, optFns ...func(*Options)) (*EnableKeyRotationOutput, error) {
if params == nil {
params = &EnableKeyRotationInput{}
@@ -108,25 +111,25 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -147,7 +150,7 @@ func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stac
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKeyRotation(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go
index c7cb9ef0c36..d6628feb8ec 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -60,6 +59,10 @@ import (
// - Decrypt
// - GenerateDataKey
// - GenerateDataKeyPair
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...func(*Options)) (*EncryptOutput, error) {
if params == nil {
params = &EncryptInput{}
@@ -179,25 +182,25 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -218,7 +221,7 @@ func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEncrypt(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go
index ba0b66f1826..4a46272c303 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -82,6 +81,10 @@ import (
// - GenerateDataKeyPair
// - GenerateDataKeyPairWithoutPlaintext
// - GenerateDataKeyWithoutPlaintext
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) {
if params == nil {
params = &GenerateDataKeyInput{}
@@ -230,25 +233,25 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack,
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -269,7 +272,7 @@ func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack,
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go
index 8b7e3f0259d..2156fb5f613 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -17,7 +16,7 @@ import (
// the private key that is encrypted under the symmetric encryption KMS key you
// specify. You can use the data key pair to perform asymmetric cryptography and
// implement digital signatures outside of KMS. The bytes in the keys are random;
-// they not related to the caller or to the KMS key that is used to encrypt the
+// they are not related to the caller or to the KMS key that is used to encrypt the
// private key. You can use the public key that GenerateDataKeyPair returns to
// encrypt data or verify a signature outside of KMS. Then, store the encrypted
// private key with the data. When you are ready to decrypt data or sign a message,
@@ -75,6 +74,10 @@ import (
// - GenerateDataKey
// - GenerateDataKeyPairWithoutPlaintext
// - GenerateDataKeyWithoutPlaintext
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GenerateDataKeyPair(ctx context.Context, params *GenerateDataKeyPairInput, optFns ...func(*Options)) (*GenerateDataKeyPairOutput, error) {
if params == nil {
params = &GenerateDataKeyPairInput{}
@@ -228,25 +231,25 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -267,7 +270,7 @@ func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.St
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPair(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go
index a6e58e4f8c3..94393a5168b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -52,6 +51,10 @@ import (
// - GenerateDataKey
// - GenerateDataKeyPair
// - GenerateDataKeyWithoutPlaintext
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GenerateDataKeyPairWithoutPlaintext(ctx context.Context, params *GenerateDataKeyPairWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyPairWithoutPlaintextOutput, error) {
if params == nil {
params = &GenerateDataKeyPairWithoutPlaintextInput{}
@@ -170,25 +173,25 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -209,7 +212,7 @@ func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stac
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go
index 0113a566dc7..0a5af3cd567 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -57,6 +56,10 @@ import (
// - GenerateDataKey
// - GenerateDataKeyPair
// - GenerateDataKeyPairWithoutPlaintext
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GenerateDataKeyWithoutPlaintext(ctx context.Context, params *GenerateDataKeyWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyWithoutPlaintextOutput, error) {
if params == nil {
params = &GenerateDataKeyWithoutPlaintextInput{}
@@ -167,25 +170,25 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -206,7 +209,7 @@ func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *m
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go
index ad7b6e0b35d..5a2819bdb39 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -33,7 +32,10 @@ import (
// perform this operation with a KMS key in a different Amazon Web Services
// account, specify the key ARN or alias ARN in the value of the KeyId parameter.
// Required permissions: kms:GenerateMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: VerifyMac
+// (key policy) Related operations: VerifyMac Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optFns ...func(*Options)) (*GenerateMacOutput, error) {
if params == nil {
params = &GenerateMacInput{}
@@ -131,25 +133,25 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -170,7 +172,7 @@ func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateMac(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go
index 5fe3f3fb10d..b2a2569ee82 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -33,7 +32,9 @@ import (
// Cross-account use: Not applicable. GenerateRandom does not use any
// account-specific resources, such as KMS keys. Required permissions:
// kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (IAM policy)
+// (IAM policy) Eventual consistency: The KMS API follows an eventual consistency
+// model. For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput, optFns ...func(*Options)) (*GenerateRandomOutput, error) {
if params == nil {
params = &GenerateRandomInput{}
@@ -126,25 +127,25 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack,
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -162,7 +163,7 @@ func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack,
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateRandom(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go
index c398993dc77..db42c646ac3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -14,7 +13,10 @@ import (
// Gets a key policy attached to the specified KMS key. Cross-account use: No. You
// cannot perform this operation on a KMS key in a different Amazon Web Services
// account. Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: PutKeyPolicy
+// (key policy) Related operations: PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GetKeyPolicy(ctx context.Context, params *GetKeyPolicyInput, optFns ...func(*Options)) (*GetKeyPolicyOutput, error) {
if params == nil {
params = &GetKeyPolicyInput{}
@@ -84,25 +86,25 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -123,7 +125,7 @@ func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, op
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyPolicy(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go
index b60e1ae0060..2214d4b0120 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -52,6 +51,10 @@ import (
// (key policy) Related operations:
// - DisableKeyRotation
// - EnableKeyRotation
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) {
if params == nil {
params = &GetKeyRotationStatusInput{}
@@ -116,25 +119,25 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -155,7 +158,7 @@ func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.S
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyRotationStatus(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go
index 172892085d7..cf3220739fd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -24,10 +23,12 @@ import (
// EXTERNAL to create a KMS key with no key material. You can import key material
// for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key,
// or asymmetric signing KMS key. You can also import key material into a
-// multi-Region key of any supported type. However, you can't import key material
-// into a KMS key in a custom key store . You can also use GetParametersForImport
-// to get a public key and import token to reimport the original key material into
-// a KMS key whose key material expired or was deleted. GetParametersForImport
+// multi-Region key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
+// of any supported type. However, you can't import key material into a KMS key in
+// a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// . You can also use GetParametersForImport to get a public key and import token
+// to reimport the original key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material)
+// into a KMS key whose key material expired or was deleted. GetParametersForImport
// returns the items that you need to import your key material.
// - The public key (or "wrapping key") of an RSA key pair that KMS generates.
// You will use this public key to encrypt ("wrap") your key material while it's in
@@ -58,6 +59,10 @@ import (
// (key policy) Related operations:
// - ImportKeyMaterial
// - DeleteImportedKeyMaterial
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GetParametersForImport(ctx context.Context, params *GetParametersForImportInput, optFns ...func(*Options)) (*GetParametersForImportOutput, error) {
if params == nil {
params = &GetParametersForImportInput{}
@@ -106,8 +111,8 @@ type GetParametersForImportInput struct {
// - RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key
// material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm
// with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.
- // - RSAES_PKCS1_V1_5 (Deprecated) — Supported only for symmetric encryption key
- // material (and only in legacy mode).
+ // - RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not
+ // support the RSAES_PKCS1_V1_5 wrapping algorithm.
//
// This member is required.
WrappingAlgorithm types.AlgorithmSpec
@@ -171,25 +176,25 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -210,7 +215,7 @@ func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetParametersForImport(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go
index 10091e2ddf2..9c52330f83c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -51,7 +50,10 @@ import (
// perform this operation with a KMS key in a different Amazon Web Services
// account, specify the key ARN or alias ARN in the value of the KeyId parameter.
// Required permissions: kms:GetPublicKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: CreateKey
+// (key policy) Related operations: CreateKey Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, optFns ...func(*Options)) (*GetPublicKeyOutput, error) {
if params == nil {
params = &GetPublicKeyInput{}
@@ -160,25 +162,25 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -199,7 +201,7 @@ func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, op
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go
index 7172c3c7e72..1a599c3d14b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -86,6 +85,10 @@ import (
// (key policy) Related operations:
// - DeleteImportedKeyMaterial
// - GetParametersForImport
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMaterialInput, optFns ...func(*Options)) (*ImportKeyMaterialOutput, error) {
if params == nil {
params = &ImportKeyMaterialInput{}
@@ -189,25 +192,25 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -228,7 +231,7 @@ func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stac
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportKeyMaterial(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go
index 944e9bb93ba..41cfda0d4fc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -32,6 +31,10 @@ import (
// - CreateAlias
// - DeleteAlias
// - UpdateAlias
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) {
if params == nil {
params = &ListAliasesInput{}
@@ -117,25 +120,25 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -153,7 +156,7 @@ func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAliases(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go
index a7e07190526..ab4acaffabd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -31,6 +30,10 @@ import (
// - ListRetirableGrants
// - RetireGrant
// - RevokeGrant
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ListGrants(ctx context.Context, params *ListGrantsInput, optFns ...func(*Options)) (*ListGrantsOutput, error) {
if params == nil {
params = &ListGrantsInput{}
@@ -125,25 +128,25 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -164,7 +167,7 @@ func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, opti
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGrants(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go
index e207b236d07..604fbdfa58b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -18,7 +17,11 @@ import (
// Services account. Required permissions: kms:ListKeyPolicies (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (key policy) Related operations:
// - GetKeyPolicy
-// - PutKeyPolicy
+// - PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ListKeyPolicies(ctx context.Context, params *ListKeyPoliciesInput, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) {
if params == nil {
params = &ListKeyPoliciesInput{}
@@ -104,25 +107,25 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack,
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -143,7 +146,7 @@ func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack,
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeyPolicies(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go
index b7328b9a806..2a9165710b0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -20,6 +19,10 @@ import (
// - DescribeKey
// - ListAliases
// - ListResourceTags
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ListKeys(ctx context.Context, params *ListKeysInput, optFns ...func(*Options)) (*ListKeysOutput, error) {
if params == nil {
params = &ListKeysInput{}
@@ -95,25 +98,25 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -131,7 +134,7 @@ func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, option
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeys(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go
index b99781652e5..f04cc827c52 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -24,6 +23,10 @@ import (
// - ReplicateKey
// - TagResource
// - UntagResource
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ListResourceTags(ctx context.Context, params *ListResourceTagsInput, optFns ...func(*Options)) (*ListResourceTagsOutput, error) {
if params == nil {
params = &ListResourceTagsInput{}
@@ -114,25 +117,25 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -153,7 +156,7 @@ func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListResourceTags(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go
index 193892a9ae5..68d96486ecd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -22,15 +21,27 @@ import (
// in the Key Management Service Developer Guide . For examples of working with
// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html)
// . Cross-account use: You must specify a principal in your Amazon Web Services
-// account. However, this operation can return grants in any Amazon Web Services
-// account. You do not need kms:ListRetirableGrants permission (or any other
-// additional permission) in any Amazon Web Services account other than your own.
-// Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (IAM policy) in your Amazon Web Services account. Related operations:
+// account. This operation returns a list of grants where the retiring principal
+// specified in the ListRetirableGrants request is the same retiring principal on
+// the grant. This can include grants on KMS keys owned by other Amazon Web
+// Services accounts, but you do not need kms:ListRetirableGrants permission (or
+// any other additional permission) in any Amazon Web Services account other than
+// your own. Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
+// (IAM policy) in your Amazon Web Services account. KMS authorizes
+// ListRetirableGrants requests by evaluating the caller account's
+// kms:ListRetirableGrants permissions. The authorized resource in
+// ListRetirableGrants calls is the retiring principal specified in the request.
+// KMS does not evaluate the caller's permissions to verify their access to any KMS
+// keys or grants that might be returned by the ListRetirableGrants call. Related
+// operations:
// - CreateGrant
// - ListGrants
// - RetireGrant
// - RevokeGrant
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ListRetirableGrants(ctx context.Context, params *ListRetirableGrantsInput, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) {
if params == nil {
params = &ListRetirableGrantsInput{}
@@ -117,25 +128,25 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -156,7 +167,7 @@ func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.St
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRetirableGrants(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go
index 2cc0f596c26..d03f494969f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -20,7 +19,10 @@ import (
// in the Key Management Service Developer Guide. Cross-account use: No. You cannot
// perform this operation on a KMS key in a different Amazon Web Services account.
// Required permissions: kms:PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: GetKeyPolicy
+// (key policy) Related operations: GetKeyPolicy Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) PutKeyPolicy(ctx context.Context, params *PutKeyPolicyInput, optFns ...func(*Options)) (*PutKeyPolicyOutput, error) {
if params == nil {
params = &PutKeyPolicyInput{}
@@ -88,7 +90,8 @@ type PutKeyPolicyInput struct {
// information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
// in the Key Management Service Developer Guide. Use this parameter only when you
// intend to prevent the principal that is making the request from making a
- // subsequent PutKeyPolicy request on the KMS key.
+ // subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+ // request on the KMS key.
BypassPolicyLockoutSafetyCheck bool
noSmithyDocumentSerde
@@ -123,25 +126,25 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -162,7 +165,7 @@ func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, op
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutKeyPolicy(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go
index 95b3c2b2656..cbcd4c4934e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -78,6 +77,10 @@ import (
// - Encrypt
// - GenerateDataKey
// - GenerateDataKeyPair
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ReEncrypt(ctx context.Context, params *ReEncryptInput, optFns ...func(*Options)) (*ReEncryptOutput, error) {
if params == nil {
params = &ReEncryptInput{}
@@ -243,25 +246,25 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -282,7 +285,7 @@ func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, optio
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReEncrypt(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go
index 625b701df93..4eb5fc4cceb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -59,10 +58,10 @@ import (
// operation in the replica key's Region. If you replicate a multi-Region primary
// key with imported key material, the replica key is created with no key material.
// You must import the same key material that you imported into the primary key.
-// For details, see Importing key material into multi-Region keys in the Key
-// Management Service Developer Guide. To convert a replica key to a primary key,
-// use the UpdatePrimaryRegion operation. ReplicateKey uses different default
-// values for the KeyPolicy and Tags parameters than those used in the KMS
+// For details, see Importing key material into multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html)
+// in the Key Management Service Developer Guide. To convert a replica key to a
+// primary key, use the UpdatePrimaryRegion operation. ReplicateKey uses different
+// default values for the KeyPolicy and Tags parameters than those used in the KMS
// console. For details, see the parameter descriptions. Cross-account use: No. You
// cannot use this operation to create a replica key in a different Amazon Web
// Services account. Required permissions:
@@ -75,6 +74,10 @@ import (
// Related operations
// - CreateKey
// - UpdatePrimaryRegion
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ReplicateKey(ctx context.Context, params *ReplicateKeyInput, optFns ...func(*Options)) (*ReplicateKeyOutput, error) {
if params == nil {
params = &ReplicateKeyInput{}
@@ -132,7 +135,8 @@ type ReplicateKeyInput struct {
// information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
// in the Key Management Service Developer Guide. Use this parameter only when you
// intend to prevent the principal that is making the request from making a
- // subsequent PutKeyPolicy request on the KMS key.
+ // subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+ // request on the KMS key.
BypassPolicyLockoutSafetyCheck bool
// A description of the KMS key. The default value is an empty string (no
@@ -244,25 +248,25 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -283,7 +287,7 @@ func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, op
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReplicateKey(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go
index 1b82be01d01..e52867b2461 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -24,7 +23,7 @@ import (
// in the Key Management Service Developer Guide . For examples of working with
// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html)
// . Cross-account use: Yes. You can retire a grant on a KMS key in a different
-// Amazon Web Services account. Required permissions::Permission to retire a grant
+// Amazon Web Services account. Required permissions: Permission to retire a grant
// is determined primarily by the grant. For details, see Retiring and revoking
// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete)
// in the Key Management Service Developer Guide. Related operations:
@@ -32,6 +31,10 @@ import (
// - ListGrants
// - ListRetirableGrants
// - RevokeGrant
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) {
if params == nil {
params = &RetireGrantInput{}
@@ -104,25 +107,25 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -140,7 +143,7 @@ func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRetireGrant(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go
index 8c7bfbadc6c..2c3f013aef5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -29,6 +28,10 @@ import (
// - ListGrants
// - ListRetirableGrants
// - RetireGrant
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optFns ...func(*Options)) (*RevokeGrantOutput, error) {
if params == nil {
params = &RevokeGrantInput{}
@@ -101,25 +104,25 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -140,7 +143,7 @@ func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRevokeGrant(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go
index 2f60f67c470..a8e3354fbc8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -23,17 +22,17 @@ import (
// associated with it, including all aliases that refer to it. Deleting a KMS key
// is a destructive and potentially dangerous operation. When a KMS key is deleted,
// all data that was encrypted under the KMS key is unrecoverable. (The only
-// exception is a multi-Region replica key , or an asymmetric or HMAC KMS key with
-// imported key material .) To prevent the use of a KMS key without deleting it,
-// use DisableKey . You can schedule the deletion of a multi-Region primary key and
-// its replica keys at any time. However, KMS will not delete a multi-Region
-// primary key with existing replica keys. If you schedule the deletion of a
-// primary key with replicas, its key state changes to PendingReplicaDeletion and
-// it cannot be replicated or used in cryptographic operations. This status can
-// continue indefinitely. When the last of its replicas keys is deleted (not just
-// scheduled), the key state of the primary key changes to PendingDeletion and its
-// waiting period ( PendingWindowInDays ) begins. For details, see Deleting
-// multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html)
+// exception is a multi-Region replica key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html)
+// , or an asymmetric or HMAC KMS key with imported key material .) To prevent the
+// use of a KMS key without deleting it, use DisableKey . You can schedule the
+// deletion of a multi-Region primary key and its replica keys at any time.
+// However, KMS will not delete a multi-Region primary key with existing replica
+// keys. If you schedule the deletion of a primary key with replicas, its key state
+// changes to PendingReplicaDeletion and it cannot be replicated or used in
+// cryptographic operations. This status can continue indefinitely. When the last
+// of its replicas keys is deleted (not just scheduled), the key state of the
+// primary key changes to PendingDeletion and its waiting period (
+// PendingWindowInDays ) begins. For details, see Deleting multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html)
// in the Key Management Service Developer Guide. When KMS deletes a KMS key from
// an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html)
// , it makes a best effort to delete the associated key material from the
@@ -54,6 +53,10 @@ import (
// Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations
// - CancelKeyDeletion
// - DisableKey
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDeletionInput, optFns ...func(*Options)) (*ScheduleKeyDeletionOutput, error) {
if params == nil {
params = &ScheduleKeyDeletionInput{}
@@ -146,25 +149,25 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -185,7 +188,7 @@ func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.St
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScheduleKeyDeletion(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go
index bcf3adc4eaf..c69fe69e42c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -50,7 +49,10 @@ import (
// perform this operation with a KMS key in a different Amazon Web Services
// account, specify the key ARN or alias ARN in the value of the KeyId parameter.
// Required permissions: kms:Sign (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: Verify
+// (key policy) Related operations: Verify Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) {
if params == nil {
params = &SignInput{}
@@ -190,25 +192,25 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -229,7 +231,7 @@ func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Op
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSign(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go
index f4feb4702a7..001db07b674 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -40,6 +39,10 @@ import (
// - ListResourceTags
// - ReplicateKey
// - UntagResource
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) {
if params == nil {
params = &TagResourceInput{}
@@ -109,25 +112,25 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -148,7 +151,7 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go
index 9e5bd75fb48..c3cae702817 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -33,6 +32,10 @@ import (
// - ListResourceTags
// - ReplicateKey
// - TagResource
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) {
if params == nil {
params = &UntagResourceInput{}
@@ -97,25 +100,25 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -136,7 +139,7 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go
index 1cff25022fc..cb429fd7382 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -44,6 +43,10 @@ import (
// - CreateAlias
// - DeleteAlias
// - ListAliases
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) {
if params == nil {
params = &UpdateAliasInput{}
@@ -118,25 +121,25 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -157,7 +160,7 @@ func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAlias(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go
index 047a795c8b3..14447d9d9b8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -70,6 +69,10 @@ import (
// - DeleteCustomKeyStore
// - DescribeCustomKeyStores
// - DisconnectCustomKeyStore
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) UpdateCustomKeyStore(ctx context.Context, params *UpdateCustomKeyStoreInput, optFns ...func(*Options)) (*UpdateCustomKeyStoreOutput, error) {
if params == nil {
params = &UpdateCustomKeyStoreInput{}
@@ -208,25 +211,25 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -247,7 +250,7 @@ func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.S
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCustomKeyStore(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go
index dd72d4208d6..d3636033222 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -20,6 +19,10 @@ import (
// (key policy) Related operations
// - CreateKey
// - DescribeKey
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDescriptionInput, optFns ...func(*Options)) (*UpdateKeyDescriptionOutput, error) {
if params == nil {
params = &UpdateKeyDescriptionInput{}
@@ -86,25 +89,25 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -125,7 +128,7 @@ func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.S
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKeyDescription(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go
index af4ea44820d..ee5bbc6e8f7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -62,6 +61,10 @@ import (
// Related operations
// - CreateKey
// - ReplicateKey
+//
+// Eventual consistency: The KMS API follows an eventual consistency model. For
+// more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) UpdatePrimaryRegion(ctx context.Context, params *UpdatePrimaryRegionInput, optFns ...func(*Options)) (*UpdatePrimaryRegionOutput, error) {
if params == nil {
params = &UpdatePrimaryRegionInput{}
@@ -130,25 +133,25 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -169,7 +172,7 @@ func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.St
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePrimaryRegion(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go
index ffa81fdfef3..6b880e43af4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -43,7 +42,10 @@ import (
// perform this operation with a KMS key in a different Amazon Web Services
// account, specify the key ARN or alias ARN in the value of the KeyId parameter.
// Required permissions: kms:Verify (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: Sign
+// (key policy) Related operations: Sign Eventual consistency: The KMS API follows
+// an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) Verify(ctx context.Context, params *VerifyInput, optFns ...func(*Options)) (*VerifyOutput, error) {
if params == nil {
params = &VerifyInput{}
@@ -178,25 +180,25 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -217,7 +219,7 @@ func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerify(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go
index 609421dc862..cbffec19181 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/kms/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -30,7 +29,10 @@ import (
// this operation with a KMS key in a different Amazon Web Services account,
// specify the key ARN or alias ARN in the value of the KeyId parameter. Required
// permissions: kms:VerifyMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy) Related operations: GenerateMac
+// (key policy) Related operations: GenerateMac Eventual consistency: The KMS API
+// follows an eventual consistency model. For more information, see KMS eventual
+// consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+// .
func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns ...func(*Options)) (*VerifyMacOutput, error) {
if params == nil {
params = &VerifyMacInput{}
@@ -135,25 +137,25 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -174,7 +176,7 @@ func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, optio
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerifyMac(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go
index dafb70143a2..16619a43751 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go
@@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error
return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
}
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
// AuthResolverParameters contains the set of inputs necessary for auth scheme
// resolution.
type AuthResolverParameters struct {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go
index e573d93a909..573d41aeea4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go
@@ -89,9 +89,6 @@ func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Respo
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -99,7 +96,7 @@ func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Respo
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -111,13 +108,12 @@ func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Respo
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -212,9 +208,6 @@ func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.R
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -222,7 +215,7 @@ func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.R
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -234,13 +227,12 @@ func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.R
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode):
return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody)
@@ -313,9 +305,6 @@ func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -323,7 +312,7 @@ func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -335,13 +324,12 @@ func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("AlreadyExistsException", errorCode):
return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody)
@@ -442,9 +430,6 @@ func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Re
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -452,7 +437,7 @@ func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Re
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -464,13 +449,12 @@ func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Re
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CloudHsmClusterInUseException", errorCode):
return awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response, errorBody)
@@ -601,9 +585,6 @@ func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -611,7 +592,7 @@ func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -623,13 +604,12 @@ func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -736,9 +716,6 @@ func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, met
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -746,7 +723,7 @@ func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, met
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -758,13 +735,12 @@ func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, met
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode):
return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody)
@@ -883,9 +859,6 @@ func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metad
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -893,7 +866,7 @@ func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metad
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -905,13 +878,12 @@ func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metad
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1002,9 +974,6 @@ func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1012,7 +981,7 @@ func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1024,13 +993,12 @@ func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1122,9 +1090,6 @@ func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Re
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1132,7 +1097,7 @@ func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Re
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1144,13 +1109,12 @@ func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Re
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CustomKeyStoreHasCMKsException", errorCode):
return awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response, errorBody)
@@ -1220,9 +1184,6 @@ func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyht
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1230,7 +1191,7 @@ func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyht
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1242,13 +1203,12 @@ func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyht
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1346,9 +1306,6 @@ func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1356,7 +1313,7 @@ func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1368,13 +1325,12 @@ func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode):
return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody)
@@ -1463,9 +1419,6 @@ func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1473,7 +1426,7 @@ func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1485,13 +1438,12 @@ func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1561,9 +1513,6 @@ func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, me
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1571,7 +1520,7 @@ func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, me
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1583,13 +1532,12 @@ func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, me
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1662,9 +1610,6 @@ func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Resp
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1672,7 +1617,7 @@ func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Resp
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1684,13 +1629,12 @@ func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Resp
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1791,9 +1735,6 @@ func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhtt
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1801,7 +1742,7 @@ func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhtt
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1813,13 +1754,12 @@ func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhtt
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode):
return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody)
@@ -1886,9 +1826,6 @@ func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, met
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -1896,7 +1833,7 @@ func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, met
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -1908,13 +1845,12 @@ func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, met
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -1990,9 +1926,6 @@ func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Respo
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2000,7 +1933,7 @@ func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Respo
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2012,13 +1945,12 @@ func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Respo
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -2119,9 +2051,6 @@ func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metad
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2129,7 +2058,7 @@ func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metad
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2141,13 +2070,12 @@ func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metad
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -2254,9 +2182,6 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Respons
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2264,7 +2189,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Respons
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2276,13 +2201,12 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Respons
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -2389,9 +2313,6 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Res
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2399,7 +2320,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Res
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2411,13 +2332,12 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Res
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -2527,9 +2447,6 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2537,7 +2454,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2549,13 +2466,12 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -2665,9 +2581,6 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *sm
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2675,7 +2588,7 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *sm
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2687,13 +2600,12 @@ func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *sm
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -2800,9 +2712,6 @@ func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2810,7 +2719,7 @@ func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2822,13 +2731,12 @@ func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DisabledException", errorCode):
return awsAwsjson11_deserializeErrorDisabledException(response, errorBody)
@@ -2932,9 +2840,6 @@ func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -2942,7 +2847,7 @@ func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -2954,13 +2859,12 @@ func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode):
return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody)
@@ -3055,9 +2959,6 @@ func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response,
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3065,7 +2966,7 @@ func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response,
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3077,13 +2978,12 @@ func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response,
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3178,9 +3078,6 @@ func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Re
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3188,7 +3085,7 @@ func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Re
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3200,13 +3097,12 @@ func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Re
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3304,9 +3200,6 @@ func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp.
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3314,7 +3207,7 @@ func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp.
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3326,13 +3219,12 @@ func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp.
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3430,9 +3322,6 @@ func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response,
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3440,7 +3329,7 @@ func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response,
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3452,13 +3341,12 @@ func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response,
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3568,9 +3456,6 @@ func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Respo
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3578,7 +3463,7 @@ func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Respo
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3590,13 +3475,12 @@ func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Respo
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3706,9 +3590,6 @@ func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3716,7 +3597,7 @@ func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3728,13 +3609,12 @@ func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3829,9 +3709,6 @@ func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, me
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3839,7 +3716,7 @@ func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, me
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3851,13 +3728,12 @@ func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, me
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -3958,9 +3834,6 @@ func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Respons
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -3968,7 +3841,7 @@ func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Respons
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -3980,13 +3853,12 @@ func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Respons
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -4081,9 +3953,6 @@ func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, meta
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4091,7 +3960,7 @@ func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, meta
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4103,13 +3972,12 @@ func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, meta
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -4198,9 +4066,6 @@ func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Respon
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4208,7 +4073,7 @@ func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Respon
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4220,13 +4085,12 @@ func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Respon
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("InvalidArnException", errorCode):
return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody)
@@ -4318,9 +4182,6 @@ func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Res
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4328,7 +4189,7 @@ func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Res
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4340,13 +4201,12 @@ func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Res
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -4419,9 +4279,6 @@ func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response,
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4429,7 +4286,7 @@ func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response,
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4441,13 +4298,12 @@ func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response,
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -4551,9 +4407,6 @@ func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, met
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4561,7 +4414,7 @@ func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, met
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4573,13 +4426,12 @@ func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, met
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -4692,9 +4544,6 @@ func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response,
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4702,7 +4551,7 @@ func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response,
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4714,13 +4563,12 @@ func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response,
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("AlreadyExistsException", errorCode):
return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody)
@@ -4808,9 +4656,6 @@ func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4818,7 +4663,7 @@ func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4830,13 +4675,12 @@ func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -4918,9 +4762,6 @@ func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -4928,7 +4769,7 @@ func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -4940,13 +4781,12 @@ func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -5047,9 +4887,6 @@ func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Res
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5057,7 +4894,7 @@ func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Res
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5069,13 +4906,12 @@ func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Res
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -5170,9 +5006,6 @@ func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5180,7 +5013,7 @@ func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5192,13 +5025,12 @@ func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -5283,9 +5115,6 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5293,7 +5122,7 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5305,13 +5134,12 @@ func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("InvalidArnException", errorCode):
return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody)
@@ -5387,9 +5215,6 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response,
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5397,7 +5222,7 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response,
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5409,13 +5234,12 @@ func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response,
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("InvalidArnException", errorCode):
return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody)
@@ -5488,9 +5312,6 @@ func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, m
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5498,7 +5319,7 @@ func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5510,13 +5331,12 @@ func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -5611,9 +5431,6 @@ func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Re
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5621,7 +5438,7 @@ func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Re
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5633,13 +5450,12 @@ func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Re
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode):
return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody)
@@ -5748,9 +5564,6 @@ func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Re
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5758,7 +5571,7 @@ func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Re
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5770,13 +5583,12 @@ func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Re
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -5849,9 +5661,6 @@ func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Res
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5859,7 +5668,7 @@ func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Res
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5871,13 +5680,12 @@ func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Res
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DisabledException", errorCode):
return awsAwsjson11_deserializeErrorDisabledException(response, errorBody)
@@ -5975,9 +5783,6 @@ func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metada
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -5985,7 +5790,7 @@ func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metada
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -5997,13 +5802,12 @@ func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metada
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DependencyTimeoutException", errorCode):
return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody)
@@ -6113,9 +5917,6 @@ func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, met
errorMessage := errorCode
headerCode := response.Header.Get("X-Amzn-ErrorType")
- if len(headerCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(headerCode)
- }
var buff [1024]byte
ringBuffer := smithyio.NewRingBuffer(buff[:])
@@ -6123,7 +5924,7 @@ func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, met
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ bodyInfo, err := getProtocolErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -6135,13 +5936,12 @@ func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, met
}
errorBody.Seek(0, io.SeekStart)
- if len(headerCode) == 0 && len(jsonCode) != 0 {
- errorCode = restjson.SanitizeErrorCode(jsonCode)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
}
- if len(message) != 0 {
- errorMessage = message
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
}
-
switch {
case strings.EqualFold("DisabledException", errorCode):
return awsAwsjson11_deserializeErrorDisabledException(response, errorBody)
@@ -12982,3 +12782,32 @@ func awsAwsjson11_deserializeOpDocumentVerifyOutput(v **VerifyOutput, value inte
*v = sv
return nil
}
+
+type protocolErrorInfo struct {
+ Type string `json:"__type"`
+ Message string
+ Code any // nonstandard for awsjson but some services do present the type here
+}
+
+func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) {
+ var errInfo protocolErrorInfo
+ if err := decoder.Decode(&errInfo); err != nil {
+ if err == io.EOF {
+ return errInfo, nil
+ }
+ return errInfo, err
+ }
+
+ return errInfo, nil
+}
+
+func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) {
+ if len(headerType) != 0 {
+ return headerType, true
+ } else if len(bodyInfo.Type) != 0 {
+ return bodyInfo.Type, true
+ } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 {
+ return code, true
+ }
+ return "", false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go
index 766885cbb8c..5d09307d3b0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go
@@ -216,6 +216,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) {
}
}
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
// EndpointParameters provides the parameters that influence how endpoints are
// resolved.
type EndpointParameters struct {
@@ -366,7 +373,7 @@ func (r *resolver) ResolveEndpoint(
}
}
if _UseFIPS == true {
- if true == _PartitionResult.SupportsFIPS {
+ if _PartitionResult.SupportsFIPS == true {
uriString := func() string {
var out strings.Builder
out.WriteString("https://kms-fips.")
@@ -442,7 +449,7 @@ type endpointParamsBinder interface {
func bindEndpointParams(input interface{}, options Options) *EndpointParameters {
params := &EndpointParameters{}
- params.Region = aws.String(endpoints.MapFIPSRegion(options.Region))
+ params.Region = bindRegion(options.Region)
params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
params.Endpoint = options.BaseEndpoint
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json
index 5fa6546a0e5..eb4b1181bb6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json
@@ -71,6 +71,7 @@
"options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
"types/enums.go",
"types/errors.go",
"types/types.go",
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go
index b1e237e444a..a91c7c51fa1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go
@@ -3,4 +3,4 @@
package kms
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.27.2"
+const goModuleVersion = "1.29.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go
index fc0d645da91..bcd411119d5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go
@@ -364,6 +364,24 @@ var defaultPartitions = endpoints.Partitions{
},
Deprecated: aws.TrueTernary,
},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "kms-fips.ca-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "kms-fips.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
endpoints.EndpointKey{
Region: "eu-central-1",
}: endpoints.Endpoint{},
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go
index b4057f7eb59..b24e2565947 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go
@@ -70,11 +70,9 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
+ // per operation call's retry max attempts. If specified in an operation call's
+ // functional options with a value that is different than the constructed client's
+ // Options, the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go
index dec2d3ecbe0..3421fe7cb64 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go
@@ -1041,9 +1041,9 @@ func (e *UnsupportedOperationException) ErrorCode() string {
}
func (e *UnsupportedOperationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
-// The request was rejected because the ( XksKeyId ) is already associated with a
-// KMS key in this external key store. Each KMS key in an external key store must
-// be associated with a different external key.
+// The request was rejected because the ( XksKeyId ) is already associated with
+// another KMS key in this external key store. Each KMS key in an external key
+// store must be associated with a different external key.
type XksKeyAlreadyInUseException struct {
Message *string
@@ -1164,9 +1164,9 @@ func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorFault() smithy
return smithy.FaultClient
}
-// The request was rejected because the Amazon VPC endpoint service configuration
-// does not fulfill the requirements for an external key store proxy. For details,
-// see the exception message.
+// The request was rejected because the external key store proxy is not configured
+// correctly. To identify the cause, see the error message that accompanies the
+// exception.
type XksProxyInvalidConfigurationException struct {
Message *string
@@ -1223,10 +1223,9 @@ func (e *XksProxyInvalidResponseException) ErrorCode() string {
}
func (e *XksProxyInvalidResponseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
-// The request was rejected because the concatenation of the XksProxyUriEndpoint
-// is already associated with an external key store in the Amazon Web Services
-// account and Region. Each external key store in an account and Region must use a
-// unique external key store proxy address.
+// The request was rejected because the XksProxyUriEndpoint is already associated
+// with another external key store in this Amazon Web Services Region. To identify
+// the cause, see the error message that accompanies the exception.
type XksProxyUriEndpointInUseException struct {
Message *string
@@ -1253,9 +1252,9 @@ func (e *XksProxyUriEndpointInUseException) ErrorCode() string {
func (e *XksProxyUriEndpointInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request was rejected because the concatenation of the XksProxyUriEndpoint
-// and XksProxyUriPath is already associated with an external key store in the
-// Amazon Web Services account and Region. Each external key store in an account
-// and Region must use a unique external key store proxy API address.
+// and XksProxyUriPath is already associated with another external key store in
+// this Amazon Web Services Region. Each external key store in a Region must use a
+// unique external key store proxy API address.
type XksProxyUriInUseException struct {
Message *string
@@ -1312,9 +1311,9 @@ func (e *XksProxyUriUnreachableException) ErrorCode() string {
func (e *XksProxyUriUnreachableException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request was rejected because the specified Amazon VPC endpoint service is
-// already associated with an external key store in the Amazon Web Services account
-// and Region. Each external key store in an Amazon Web Services account and Region
-// must use a different Amazon VPC endpoint service.
+// already associated with another external key store in this Amazon Web Services
+// Region. Each external key store in a Region must use a different Amazon VPC
+// endpoint service.
type XksProxyVpcEndpointServiceInUseException struct {
Message *string
@@ -1343,9 +1342,10 @@ func (e *XksProxyVpcEndpointServiceInUseException) ErrorFault() smithy.ErrorFaul
}
// The request was rejected because the Amazon VPC endpoint service configuration
-// does not fulfill the requirements for an external key store proxy. For details,
-// see the exception message and review the requirements for Amazon VPC endpoint
-// service connectivity for an external key store.
+// does not fulfill the requirements for an external key store. To identify the
+// cause, see the error message that accompanies the exception and review the
+// requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
type XksProxyVpcEndpointServiceInvalidConfigurationException struct {
Message *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index 73621142dc0..f0a4e60daae 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,45 @@
+# v1.20.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.19.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.19.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2024-01-18)
+
+* No change notes available for this release.
+
+# v1.18.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.18.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
# v1.18.2 (2023-12-01)
* **Bug Fix**: Correct wrapping of errors in authentication workflow.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
index 879dc35a11d..fff457735be 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
@@ -42,6 +42,8 @@ func New(options Options, optFns ...func(*Options)) *Client {
setResolvedDefaultsMode(&options)
+ resolveRetryer(&options)
+
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
@@ -54,10 +56,12 @@ func New(options Options, optFns ...func(*Options)) *Client {
fn(&options)
}
- resolveRetryer(&options)
+ finalizeRetryMaxAttempts(&options)
ignoreAnonymousAuth(&options)
+ wrapWithAnonymousAuth(&options)
+
resolveAuthSchemes(&options)
client := &Client{
@@ -85,7 +89,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -333,7 +337,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -349,17 +361,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
}
func addClientUserAgent(stack *middleware.Stack, options Options) error {
- if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack); err != nil {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
return err
}
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)
if len(options.AppID) > 0 {
- return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack)
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
}
return nil
}
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
@@ -378,12 +410,48 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ })
+ if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -416,12 +484,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
index 436eadc8647..4b21e8b00a9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
@@ -83,22 +83,22 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -119,7 +119,7 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
index d81b067701c..e44da697c55 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
@@ -88,22 +88,22 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -124,7 +124,7 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
index 38f8472ae1c..2d7add067fa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
@@ -86,22 +86,22 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -122,7 +122,7 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
index 82e98a89417..3ee682d19e0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
@@ -78,22 +78,22 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -114,7 +114,7 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
index f7702dd0d3b..3b28e825dd0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
@@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error
return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
}
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
// AuthResolverParameters contains the set of inputs necessary for auth scheme
// resolution.
type AuthResolverParameters struct {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
index d31380cf283..76521eec0e5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
@@ -216,6 +216,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) {
}
}
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
// EndpointParameters provides the parameters that influence how endpoints are
// resolved.
type EndpointParameters struct {
@@ -461,7 +468,7 @@ type endpointParamsBinder interface {
func bindEndpointParams(input interface{}, options Options) *EndpointParameters {
params := &EndpointParameters{}
- params.Region = aws.String(endpoints.MapFIPSRegion(options.Region))
+ params.Region = bindRegion(options.Region)
params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
params.Endpoint = options.BaseEndpoint
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
index 53060bccf5e..62aba0d0552 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
@@ -25,6 +25,7 @@
"options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
"types/errors.go",
"types/types.go",
"validators.go"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index 7322f306ea5..250762b75ee 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.18.2"
+const goModuleVersion = "1.20.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
index f044afde47c..c8f7c09e46d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
@@ -283,6 +283,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "il-central-1",
},
},
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.me-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-central-1",
+ },
+ },
endpoints.EndpointKey{
Region: "me-south-1",
}: endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
index ddc5c0df369..5dee7e53f47 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
@@ -70,11 +70,9 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
+ // per operation call's retry max attempts. If specified in an operation call's
+ // functional options with a value that is different than the constructed client's
+ // Options, the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index 446559ca96d..13cffac4449 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,45 @@
+# v1.23.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.22.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.22.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.7 (2024-01-16)
+
+* No change notes available for this release.
+
+# v1.21.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.21.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
# v1.21.2 (2023-12-01)
* **Bug Fix**: Correct wrapping of errors in authentication workflow.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
index 59239741a91..8dc643bb0c5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
@@ -42,6 +42,8 @@ func New(options Options, optFns ...func(*Options)) *Client {
setResolvedDefaultsMode(&options)
+ resolveRetryer(&options)
+
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
@@ -54,10 +56,12 @@ func New(options Options, optFns ...func(*Options)) *Client {
fn(&options)
}
- resolveRetryer(&options)
+ finalizeRetryMaxAttempts(&options)
ignoreAnonymousAuth(&options)
+ wrapWithAnonymousAuth(&options)
+
resolveAuthSchemes(&options)
client := &Client{
@@ -85,7 +89,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -333,7 +337,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -349,17 +361,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
}
func addClientUserAgent(stack *middleware.Stack, options Options) error {
- if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack); err != nil {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
return err
}
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)
if len(options.AppID) > 0 {
- return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack)
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
}
return nil
}
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
@@ -378,12 +410,48 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ })
+ if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -416,12 +484,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
index 42464294141..63f1eeb1312 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
@@ -140,22 +140,22 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -176,7 +176,7 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
index ed4b98f7631..63409538940 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -164,25 +163,25 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -203,7 +202,7 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
index 7aee9049166..09f016ec1ef 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
@@ -98,22 +98,22 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack,
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -134,7 +134,7 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack,
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
index d30349e6b29..c568805b226 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
@@ -106,22 +106,22 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -142,7 +142,7 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
index fe88367c4c2..40b3becb9f2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
@@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error
return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
}
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
// AuthResolverParameters contains the set of inputs necessary for auth scheme
// resolution.
type AuthResolverParameters struct {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
index 85b87089026..94e835e7115 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
@@ -216,6 +216,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) {
}
}
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
// EndpointParameters provides the parameters that influence how endpoints are
// resolved.
type EndpointParameters struct {
@@ -461,7 +468,7 @@ type endpointParamsBinder interface {
func bindEndpointParams(input interface{}, options Options) *EndpointParameters {
params := &EndpointParameters{}
- params.Region = aws.String(endpoints.MapFIPSRegion(options.Region))
+ params.Region = bindRegion(options.Region)
params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
params.Endpoint = options.BaseEndpoint
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
index 0a6b34935a2..62007829b60 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
@@ -25,6 +25,7 @@
"options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
"types/errors.go",
"types/types.go",
"validators.go"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index ee0a973640f..8c5f455b146 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.21.2"
+const goModuleVersion = "1.23.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
index c48da8b88a6..cbd77fd291c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
@@ -283,6 +283,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "il-central-1",
},
},
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.me-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-central-1",
+ },
+ },
endpoints.EndpointKey{
Region: "me-south-1",
}: endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
index f338f7d6857..b964e7e1090 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
@@ -70,11 +70,9 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
+ // per operation call's retry max attempts. If specified in an operation call's
+ // functional options with a value that is different than the constructed client's
+ // Options, the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 0704f0a5de8..0f4845a52e9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,46 @@
+# v1.28.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.7 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.26.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.26.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication.
+
# v1.26.2 (2023-12-01)
* **Bug Fix**: Correct wrapping of errors in authentication workflow.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
index 32c391e56e8..8b38a505ccf 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
@@ -46,6 +46,8 @@ func New(options Options, optFns ...func(*Options)) *Client {
setResolvedDefaultsMode(&options)
+ resolveRetryer(&options)
+
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
@@ -58,10 +60,12 @@ func New(options Options, optFns ...func(*Options)) *Client {
fn(&options)
}
- resolveRetryer(&options)
+ finalizeRetryMaxAttempts(&options)
ignoreAnonymousAuth(&options)
+ wrapWithAnonymousAuth(&options)
+
resolveAuthSchemes(&options)
client := &Client{
@@ -89,7 +93,7 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -337,7 +341,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -353,17 +365,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
}
func addClientUserAgent(stack *middleware.Stack, options Options) error {
- if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack); err != nil {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
return err
}
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)
if len(options.AppID) > 0 {
- return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack)
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
}
return nil
}
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
@@ -382,12 +414,48 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ })
+ if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -420,12 +488,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
// HTTPPresignerV4 represents presigner interface used by presign url client
@@ -540,6 +614,12 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
}
+ if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+ }
stack.Deserialize.Clear()
stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
stack.Build.Remove("UserAgent")
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
index 2938dac8e38..e0e2c9c2e8d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
@@ -349,25 +349,25 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -388,7 +388,7 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
index ef576b6407d..2a57b72ac99 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
@@ -298,22 +298,22 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -334,7 +334,7 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
index b2f126b1d0d..98108ce6af0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
@@ -317,22 +317,22 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -353,7 +353,7 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
index 97a00b97da2..b4ad54ab2fa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -95,25 +94,25 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -134,7 +133,7 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
index e01fcebfe52..1f7cbcc2bbb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -88,25 +87,25 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -127,7 +126,7 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
index 80296940899..acb7ede44fd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
@@ -83,25 +83,25 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -119,7 +119,7 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
index efaba119c92..3679618cb5a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -255,25 +254,25 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -294,7 +293,7 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
index 7b07435f225..751fb147d4b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
@@ -6,7 +6,6 @@ import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -141,25 +140,25 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack,
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
@@ -177,7 +176,7 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack,
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil {
return err
}
- if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
index 07d4dc55c59..9db5bfd4348 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
@@ -52,6 +52,34 @@ func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error
return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
}
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
// AuthResolverParameters contains the set of inputs necessary for auth scheme
// resolution.
type AuthResolverParameters struct {
@@ -92,34 +120,12 @@ func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params
var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
"AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option {
return []*smithyauth.Option{
- {
- SchemeID: smithyauth.SchemeIDSigV4,
- SignerProperties: func() smithy.Properties {
- var props smithy.Properties
- smithyhttp.SetSigV4SigningName(&props, "sts")
- smithyhttp.SetSigV4SigningRegion(&props, params.Region)
-
- return props
- }(),
- },
-
{SchemeID: smithyauth.SchemeIDAnonymous},
}
},
"AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option {
return []*smithyauth.Option{
- {
- SchemeID: smithyauth.SchemeIDSigV4,
- SignerProperties: func() smithy.Properties {
- var props smithy.Properties
- smithyhttp.SetSigV4SigningName(&props, "sts")
- smithyhttp.SetSigV4SigningRegion(&props, params.Region)
-
- return props
- }(),
- },
-
{SchemeID: smithyauth.SchemeIDAnonymous},
}
},
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
index 9f7932f9a06..32e2d5435f4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
@@ -217,6 +217,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) {
}
}
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
// EndpointParameters provides the parameters that influence how endpoints are
// resolved.
type EndpointParameters struct {
@@ -1041,7 +1048,7 @@ type endpointParamsBinder interface {
func bindEndpointParams(input interface{}, options Options) *EndpointParameters {
params := &EndpointParameters{}
- params.Region = aws.String(endpoints.MapFIPSRegion(options.Region))
+ params.Region = bindRegion(options.Region)
params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
params.Endpoint = options.BaseEndpoint
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
index d90b8bce4b4..54fac4bd5b7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
@@ -31,6 +31,7 @@
"options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
"types/errors.go",
"types/types.go",
"validators.go"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index 7fd1c74bd6a..06957274a3f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.26.2"
+const goModuleVersion = "1.28.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
index ca4c881909a..3dbd993b54b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
@@ -183,6 +183,9 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-central-1",
}: endpoints.Endpoint{},
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
index 2c7a03701c8..5c1be79f8c0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
@@ -70,11 +70,9 @@ type Options struct {
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
+ // per operation call's retry max attempts. If specified in an operation call's
+ // functional options with a value that is different than the constructed client's
+ // Options, the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore
index c92d6105eb3..2518b349154 100644
--- a/vendor/github.com/aws/smithy-go/.gitignore
+++ b/vendor/github.com/aws/smithy-go/.gitignore
@@ -24,3 +24,6 @@ build/
# VS Code
bin/
.vscode/
+
+# make
+c.out
diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md
index 8c83df00eb3..b8d6561a4e1 100644
--- a/vendor/github.com/aws/smithy-go/CHANGELOG.md
+++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md
@@ -1,3 +1,22 @@
+# Release (2024-02-21)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.1
+ * **Bug Fix**: Remove runtime dependency on go-cmp.
+
+# Release (2024-02-13)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.0
+ * **Feature**: Add codegen definition for sigv4a trait.
+ * **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# Release (2023-12-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.19.0
+ * **Feature**: Support modeled request compression.
+
# Release (2023-11-30)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile
index 4b3c209373c..e66fa8caceb 100644
--- a/vendor/github.com/aws/smithy-go/Makefile
+++ b/vendor/github.com/aws/smithy-go/Makefile
@@ -33,13 +33,18 @@ smithy-clean:
##################
# Linting/Verify #
##################
-.PHONY: verify vet
+.PHONY: verify vet cover
verify: vet
vet:
go vet ${BUILD_TAGS} --all ./...
+cover:
+ go test ${BUILD_TAGS} -coverprofile c.out ./...
+ @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \
+ echo "total (statements): $$cover%";
+
################
# Unit Testing #
################
diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go
index bb04fc9e8fe..341392e10f8 100644
--- a/vendor/github.com/aws/smithy-go/go_module_metadata.go
+++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go
@@ -3,4 +3,4 @@
package smithy
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.18.1"
+const goModuleVersion = "1.20.1"
diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml
index 20295cdd2aa..9d94b7cbd0a 100644
--- a/vendor/github.com/aws/smithy-go/modman.toml
+++ b/vendor/github.com/aws/smithy-go/modman.toml
@@ -1,5 +1,4 @@
[dependencies]
- "github.com/google/go-cmp" = "v0.5.8"
"github.com/jmespath/go-jmespath" = "v0.4.0"
[modules]
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
new file mode 100644
index 00000000000..004d78f2136
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
@@ -0,0 +1,30 @@
+package requestcompression
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+)
+
+func gzipCompress(input io.Reader) ([]byte, error) {
+ var b bytes.Buffer
+ w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create gzip writer, %v", err)
+ }
+
+ inBytes, err := io.ReadAll(input)
+ if err != nil {
+ return nil, fmt.Errorf("failed read payload to compress, %v", err)
+ }
+
+ if _, err = w.Write(inBytes); err != nil {
+ return nil, fmt.Errorf("failed to write payload to be compressed, %v", err)
+ }
+ if err = w.Close(); err != nil {
+ return nil, fmt.Errorf("failed to flush payload being compressed, %v", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
new file mode 100644
index 00000000000..06c16afc113
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
@@ -0,0 +1,52 @@
+package requestcompression
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "net/http"
+)
+
+const captureUncompressedRequestID = "CaptureUncompressedRequest"
+
+// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check
+func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error {
+ return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{
+ buf: buf,
+ }, "RequestCompression", middleware.Before)
+}
+
+type captureUncompressedRequestMiddleware struct {
+ req *http.Request
+ buf *bytes.Buffer
+ bytes []byte
+}
+
+// ID returns id of the captureUncompressedRequestMiddleware
+func (*captureUncompressedRequestMiddleware) ID() string {
+ return captureUncompressedRequestID
+}
+
+// HandleSerialize captures request payload before it is compressed by request compression middleware
+func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ output middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, fmt.Errorf("error when retrieving http request")
+ }
+
+ _, err = io.Copy(m.buf, request.GetStream())
+ if err != nil {
+ return output, metadata, fmt.Errorf("error when copying http request stream: %q", err)
+ }
+ if err = request.RewindStream(); err != nil {
+ return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err)
+ }
+
+ return next.HandleSerialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
new file mode 100644
index 00000000000..7c41476039d
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
@@ -0,0 +1,103 @@
+// Package requestcompression implements runtime support for smithy-modeled
+// request compression.
+//
+// This package is designated as private and is intended for use only by the
+// smithy client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package requestcompression
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+const MaxRequestMinCompressSizeBytes = 10485760
+
+// Enumeration values for supported compress Algorithms.
+const (
+ GZIP = "gzip"
+)
+
+type compressFunc func(io.Reader) ([]byte, error)
+
+var allowedAlgorithms = map[string]compressFunc{
+ GZIP: gzipCompress,
+}
+
+// AddRequestCompression add requestCompression middleware to op stack
+func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error {
+ return stack.Serialize.Add(&requestCompression{
+ disableRequestCompression: disabled,
+ requestMinCompressSizeBytes: minBytes,
+ compressAlgorithms: algorithms,
+ }, middleware.After)
+}
+
+type requestCompression struct {
+ disableRequestCompression bool
+ requestMinCompressSizeBytes int64
+ compressAlgorithms []string
+}
+
+// ID returns the ID of the middleware
+func (m requestCompression) ID() string {
+ return "RequestCompression"
+}
+
+// HandleSerialize gzip compress the request's stream/body if enabled by config fields
+func (m requestCompression) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.disableRequestCompression {
+ return next.HandleSerialize(ctx, in)
+ }
+ // still need to check requestMinCompressSizeBytes in case it is out of range after service client config
+ if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes {
+ return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ for _, algorithm := range m.compressAlgorithms {
+ compressFunc := allowedAlgorithms[algorithm]
+ if compressFunc != nil {
+ if stream := req.GetStream(); stream != nil {
+ size, found, err := req.StreamLength()
+ if err != nil {
+ return out, metadata, fmt.Errorf("error while finding request stream length, %v", err)
+ } else if !found || size < m.requestMinCompressSizeBytes {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ compressedBytes, err := compressFunc(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to compress request stream, %v", err)
+ }
+
+ var newReq *http.Request
+ if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil {
+ return out, metadata, fmt.Errorf("failed to set request stream, %v", err)
+ }
+ *req = *newReq
+
+ if val := req.Header.Get("Content-Encoding"); val != "" {
+ req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm))
+ } else {
+ req.Header.Set("Content-Encoding", algorithm)
+ }
+ }
+ break
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt
new file mode 100644
index 00000000000..e028b46a9b0
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
new file mode 100644
index 00000000000..4e12afdd90d
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/Makefile
@@ -0,0 +1,10 @@
+.PHONY: ci generate clean
+
+ci: clean generate
+ go test -race -v ./...
+
+generate:
+ go generate .
+
+clean:
+ rm -rf *_generated*.go
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
new file mode 100644
index 00000000000..cf6b42f3d77
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/README.md
@@ -0,0 +1,95 @@
+# httpsnoop
+
+Package httpsnoop provides an easy way to capture http related metrics (i.e.
+response time, bytes written, and http status code) from your application's
+http.Handlers.
+
+Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
+which is also exposed for users interested in a more low-level API.
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop)
+[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
+
+## Usage Example
+
+```go
+// myH is your app's http handler, perhaps a http.ServeMux or similar.
+var myH http.Handler
+// wrappedH wraps myH in order to log every request.
+wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ m := httpsnoop.CaptureMetrics(myH, w, r)
+ log.Printf(
+ "%s %s (code=%d dt=%s written=%d)",
+ r.Method,
+ r.URL,
+ m.Code,
+ m.Duration,
+ m.Written,
+ )
+})
+http.ListenAndServe(":8080", wrappedH)
+```
+
+## Why this package exists
+
+Instrumenting an application's http.Handler is surprisingly difficult.
+
+However if you google for e.g. "capture ResponseWriter status code" you'll find
+lots of advise and code examples that suggest it to be a fairly trivial
+undertaking. Unfortunately everything I've seen so far has a high chance of
+breaking your application.
+
+The main problem is that a `http.ResponseWriter` often implements additional
+interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
+`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
+in your own struct that also implements the `http.ResponseWriter` interface
+will hide the additional interfaces mentioned above. This has a high change of
+introducing subtle bugs into any non-trivial application.
+
+Another approach I've seen people take is to return a struct that implements
+all of the interfaces above. However, that's also problematic, because it's
+difficult to fake some of these interfaces behaviors when the underlying
+`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
+because an application may choose to operate differently, merely because it
+detects the presence of these additional interfaces.
+
+This package solves this problem by checking which additional interfaces a
+`http.ResponseWriter` implements, returning a wrapped version implementing the
+exact same set of interfaces.
+
+Additionally this package properly handles edge cases such as `WriteHeader` not
+being called, or called more than once, as well as concurrent calls to
+`http.ResponseWriter` methods, and even calls happening after the wrapped
+`ServeHTTP` has already returned.
+
+Unfortunately this package is not perfect either. It's possible that it is
+still missing some interfaces provided by the go core (let me know if you find
+one), and it won't work for applications adding their own interfaces into the
+mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
+`http.ResponseWriter` and type-assert the result to its other interfaces.
+
+However, hopefully the explanation above has sufficiently scared you of rolling
+your own solution to this problem. httpsnoop may still break your application,
+but at least it tries to avoid it as much as possible.
+
+Anyway, the real problem here is that smuggling additional interfaces inside
+`http.ResponseWriter` is a problematic design choice, but it probably goes as
+deep as the Go language specification itself. But that's okay, I still prefer
+Go over the alternatives ;).
+
+## Performance
+
+```
+BenchmarkBaseline-8 20000 94912 ns/op
+BenchmarkCaptureMetrics-8 20000 95461 ns/op
+```
+
+As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
+overhead of ~500 ns per http request on my machine. However, the margin of
+error appears to be larger than that, therefor it should be reasonable to
+assume that the overhead introduced by `CaptureMetrics` is absolutely
+negligible.
+
+## License
+
+MIT
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
new file mode 100644
index 00000000000..bec7b71b39c
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -0,0 +1,86 @@
+package httpsnoop
+
+import (
+ "io"
+ "net/http"
+ "time"
+)
+
+// Metrics holds metrics captured from CaptureMetrics.
+type Metrics struct {
+ // Code is the first http response code passed to the WriteHeader func of
+ // the ResponseWriter. If no such call is made, a default code of 200 is
+ // assumed instead.
+ Code int
+ // Duration is the time it took to execute the handler.
+ Duration time.Duration
+ // Written is the number of bytes successfully written by the Write or
+ // ReadFrom function of the ResponseWriter. ResponseWriters may also write
+ // data to their underlaying connection directly (e.g. headers), but those
+ // are not tracked. Therefor the number of Written bytes will usually match
+ // the size of the response body.
+ Written int64
+}
+
+// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
+// returns the metrics it captured from it.
+func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
+ return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
+ hnd.ServeHTTP(ww, r)
+ })
+}
+
+// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
+// resulting metrics. This is very similar to CaptureMetrics (which is just
+// sugar on top of this func), but is a more usable interface if your
+// application doesn't use the Go http.Handler interface.
+func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
+ m := Metrics{Code: http.StatusOK}
+ m.CaptureMetrics(w, fn)
+ return m
+}
+
+// CaptureMetrics wraps w and calls fn with the wrapped w and updates
+// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn,
+// but allows one to customize starting Metrics object.
+func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) {
+ var (
+ start = time.Now()
+ headerWritten bool
+ hooks = Hooks{
+ WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
+ return func(code int) {
+ next(code)
+
+ if !(code >= 100 && code <= 199) && !headerWritten {
+ m.Code = code
+ headerWritten = true
+ }
+ }
+ },
+
+ Write: func(next WriteFunc) WriteFunc {
+ return func(p []byte) (int, error) {
+ n, err := next(p)
+
+ m.Written += int64(n)
+ headerWritten = true
+ return n, err
+ }
+ },
+
+ ReadFrom: func(next ReadFromFunc) ReadFromFunc {
+ return func(src io.Reader) (int64, error) {
+ n, err := next(src)
+
+ headerWritten = true
+ m.Written += n
+ return n, err
+ }
+ },
+ }
+ )
+
+ fn(Wrap(w, hooks))
+ m.Duration += time.Since(start)
+}
diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go
new file mode 100644
index 00000000000..203c35b3c6d
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/docs.go
@@ -0,0 +1,10 @@
+// Package httpsnoop provides an easy way to capture http related metrics (i.e.
+// response time, bytes written, and http status code) from your application's
+// http.Handlers.
+//
+// Doing this requires non-trivial wrapping of the http.ResponseWriter
+// interface, which is also exposed for users interested in a more low-level
+// API.
+package httpsnoop
+
+//go:generate go run codegen/main.go
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
new file mode 100644
index 00000000000..101cedde674
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -0,0 +1,436 @@
+// +build go1.8
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
+
+package httpsnoop
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// HeaderFunc is part of the http.ResponseWriter interface.
+type HeaderFunc func() http.Header
+
+// WriteHeaderFunc is part of the http.ResponseWriter interface.
+type WriteHeaderFunc func(code int)
+
+// WriteFunc is part of the http.ResponseWriter interface.
+type WriteFunc func(b []byte) (int, error)
+
+// FlushFunc is part of the http.Flusher interface.
+type FlushFunc func()
+
+// CloseNotifyFunc is part of the http.CloseNotifier interface.
+type CloseNotifyFunc func() <-chan bool
+
+// HijackFunc is part of the http.Hijacker interface.
+type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
+
+// ReadFromFunc is part of the io.ReaderFrom interface.
+type ReadFromFunc func(src io.Reader) (int64, error)
+
+// PushFunc is part of the http.Pusher interface.
+type PushFunc func(target string, opts *http.PushOptions) error
+
+// Hooks defines a set of method interceptors for methods included in
+// http.ResponseWriter as well as some others. You can think of them as
+// middleware for the function calls they target. See Wrap for more details.
+type Hooks struct {
+ Header func(HeaderFunc) HeaderFunc
+ WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
+ Write func(WriteFunc) WriteFunc
+ Flush func(FlushFunc) FlushFunc
+ CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
+ Hijack func(HijackFunc) HijackFunc
+ ReadFrom func(ReadFromFunc) ReadFromFunc
+ Push func(PushFunc) PushFunc
+}
+
+// Wrap returns a wrapped version of w that provides the exact same interface
+// as w. Specifically if w implements any combination of:
+//
+// - http.Flusher
+// - http.CloseNotifier
+// - http.Hijacker
+// - io.ReaderFrom
+// - http.Pusher
+//
+// The wrapped version will implement the exact same combination. If no hooks
+// are set, the wrapped version also behaves exactly as w. Hooks targeting
+// methods not supported by w are ignored. Any other hooks will intercept the
+// method they target and may modify the call's arguments and/or return values.
+// The CaptureMetrics implementation serves as a working example for how the
+// hooks can be used.
+func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
+ rw := &rw{w: w, h: hooks}
+ _, i0 := w.(http.Flusher)
+ _, i1 := w.(http.CloseNotifier)
+ _, i2 := w.(http.Hijacker)
+ _, i3 := w.(io.ReaderFrom)
+ _, i4 := w.(http.Pusher)
+ switch {
+ // combination 1/32
+ case !i0 && !i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ }{rw, rw}
+ // combination 2/32
+ case !i0 && !i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Pusher
+ }{rw, rw, rw}
+ // combination 3/32
+ case !i0 && !i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 4/32
+ case !i0 && !i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 5/32
+ case !i0 && !i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 6/32
+ case !i0 && !i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 7/32
+ case !i0 && !i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 8/32
+ case !i0 && !i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 9/32
+ case !i0 && i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ }{rw, rw, rw}
+ // combination 10/32
+ case !i0 && i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 11/32
+ case !i0 && i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 12/32
+ case !i0 && i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 13/32
+ case !i0 && i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 14/32
+ case !i0 && i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 15/32
+ case !i0 && i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 16/32
+ case !i0 && i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 17/32
+ case i0 && !i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ }{rw, rw, rw}
+ // combination 18/32
+ case i0 && !i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ }{rw, rw, rw, rw}
+ // combination 19/32
+ case i0 && !i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 20/32
+ case i0 && !i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 21/32
+ case i0 && !i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 22/32
+ case i0 && !i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 23/32
+ case i0 && !i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 24/32
+ case i0 && !i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 25/32
+ case i0 && i1 && !i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{rw, rw, rw, rw}
+ // combination 26/32
+ case i0 && i1 && !i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Pusher
+ }{rw, rw, rw, rw, rw}
+ // combination 27/32
+ case i0 && i1 && !i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 28/32
+ case i0 && i1 && !i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 29/32
+ case i0 && i1 && i2 && !i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw, rw}
+ // combination 30/32
+ case i0 && i1 && i2 && !i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 31/32
+ case i0 && i1 && i2 && i3 && !i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw, rw}
+ // combination 32/32
+ case i0 && i1 && i2 && i3 && i4:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ http.Pusher
+ }{rw, rw, rw, rw, rw, rw, rw}
+ }
+ panic("unreachable")
+}
+
+type rw struct {
+ w http.ResponseWriter
+ h Hooks
+}
+
+func (w *rw) Unwrap() http.ResponseWriter {
+ return w.w
+}
+
+func (w *rw) Header() http.Header {
+ f := w.w.(http.ResponseWriter).Header
+ if w.h.Header != nil {
+ f = w.h.Header(f)
+ }
+ return f()
+}
+
+func (w *rw) WriteHeader(code int) {
+ f := w.w.(http.ResponseWriter).WriteHeader
+ if w.h.WriteHeader != nil {
+ f = w.h.WriteHeader(f)
+ }
+ f(code)
+}
+
+func (w *rw) Write(b []byte) (int, error) {
+ f := w.w.(http.ResponseWriter).Write
+ if w.h.Write != nil {
+ f = w.h.Write(f)
+ }
+ return f(b)
+}
+
+func (w *rw) Flush() {
+ f := w.w.(http.Flusher).Flush
+ if w.h.Flush != nil {
+ f = w.h.Flush(f)
+ }
+ f()
+}
+
+func (w *rw) CloseNotify() <-chan bool {
+ f := w.w.(http.CloseNotifier).CloseNotify
+ if w.h.CloseNotify != nil {
+ f = w.h.CloseNotify(f)
+ }
+ return f()
+}
+
+func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ f := w.w.(http.Hijacker).Hijack
+ if w.h.Hijack != nil {
+ f = w.h.Hijack(f)
+ }
+ return f()
+}
+
+func (w *rw) ReadFrom(src io.Reader) (int64, error) {
+ f := w.w.(io.ReaderFrom).ReadFrom
+ if w.h.ReadFrom != nil {
+ f = w.h.ReadFrom(f)
+ }
+ return f(src)
+}
+
+func (w *rw) Push(target string, opts *http.PushOptions) error {
+ f := w.w.(http.Pusher).Push
+ if w.h.Push != nil {
+ f = w.h.Push(f)
+ }
+ return f(target, opts)
+}
+
+type Unwrapper interface {
+ Unwrap() http.ResponseWriter
+}
+
+// Unwrap returns the underlying http.ResponseWriter from within zero or more
+// layers of httpsnoop wrappers.
+func Unwrap(w http.ResponseWriter) http.ResponseWriter {
+ if rw, ok := w.(Unwrapper); ok {
+ // recurse until rw.Unwrap() returns a non-Unwrapper
+ return Unwrap(rw.Unwrap())
+ } else {
+ return w
+ }
+}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
new file mode 100644
index 00000000000..e0951df1527
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -0,0 +1,278 @@
+// +build !go1.8
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
+
+package httpsnoop
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// HeaderFunc is part of the http.ResponseWriter interface.
+type HeaderFunc func() http.Header
+
+// WriteHeaderFunc is part of the http.ResponseWriter interface.
+type WriteHeaderFunc func(code int)
+
+// WriteFunc is part of the http.ResponseWriter interface.
+type WriteFunc func(b []byte) (int, error)
+
+// FlushFunc is part of the http.Flusher interface.
+type FlushFunc func()
+
+// CloseNotifyFunc is part of the http.CloseNotifier interface.
+type CloseNotifyFunc func() <-chan bool
+
+// HijackFunc is part of the http.Hijacker interface.
+type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
+
+// ReadFromFunc is part of the io.ReaderFrom interface.
+type ReadFromFunc func(src io.Reader) (int64, error)
+
+// Hooks defines a set of method interceptors for methods included in
+// http.ResponseWriter as well as some others. You can think of them as
+// middleware for the function calls they target. See Wrap for more details.
+type Hooks struct {
+ Header func(HeaderFunc) HeaderFunc
+ WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
+ Write func(WriteFunc) WriteFunc
+ Flush func(FlushFunc) FlushFunc
+ CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
+ Hijack func(HijackFunc) HijackFunc
+ ReadFrom func(ReadFromFunc) ReadFromFunc
+}
+
+// Wrap returns a wrapped version of w that provides the exact same interface
+// as w. Specifically if w implements any combination of:
+//
+// - http.Flusher
+// - http.CloseNotifier
+// - http.Hijacker
+// - io.ReaderFrom
+//
+// The wrapped version will implement the exact same combination. If no hooks
+// are set, the wrapped version also behaves exactly as w. Hooks targeting
+// methods not supported by w are ignored. Any other hooks will intercept the
+// method they target and may modify the call's arguments and/or return values.
+// The CaptureMetrics implementation serves as a working example for how the
+// hooks can be used.
+func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
+ rw := &rw{w: w, h: hooks}
+ _, i0 := w.(http.Flusher)
+ _, i1 := w.(http.CloseNotifier)
+ _, i2 := w.(http.Hijacker)
+ _, i3 := w.(io.ReaderFrom)
+ switch {
+ // combination 1/16
+ case !i0 && !i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ }{rw, rw}
+ // combination 2/16
+ case !i0 && !i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ io.ReaderFrom
+ }{rw, rw, rw}
+ // combination 3/16
+ case !i0 && !i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ }{rw, rw, rw}
+ // combination 4/16
+ case !i0 && !i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 5/16
+ case !i0 && i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ }{rw, rw, rw}
+ // combination 6/16
+ case !i0 && i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 7/16
+ case !i0 && i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 8/16
+ case !i0 && i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 9/16
+ case i0 && !i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ }{rw, rw, rw}
+ // combination 10/16
+ case i0 && !i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ io.ReaderFrom
+ }{rw, rw, rw, rw}
+ // combination 11/16
+ case i0 && !i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{rw, rw, rw, rw}
+ // combination 12/16
+ case i0 && !i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 13/16
+ case i0 && i1 && !i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{rw, rw, rw, rw}
+ // combination 14/16
+ case i0 && i1 && !i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw}
+ // combination 15/16
+ case i0 && i1 && i2 && !i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{rw, rw, rw, rw, rw}
+ // combination 16/16
+ case i0 && i1 && i2 && i3:
+ return struct {
+ Unwrapper
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ io.ReaderFrom
+ }{rw, rw, rw, rw, rw, rw}
+ }
+ panic("unreachable")
+}
+
+type rw struct {
+ w http.ResponseWriter
+ h Hooks
+}
+
+func (w *rw) Unwrap() http.ResponseWriter {
+ return w.w
+}
+
+func (w *rw) Header() http.Header {
+ f := w.w.(http.ResponseWriter).Header
+ if w.h.Header != nil {
+ f = w.h.Header(f)
+ }
+ return f()
+}
+
+func (w *rw) WriteHeader(code int) {
+ f := w.w.(http.ResponseWriter).WriteHeader
+ if w.h.WriteHeader != nil {
+ f = w.h.WriteHeader(f)
+ }
+ f(code)
+}
+
+func (w *rw) Write(b []byte) (int, error) {
+ f := w.w.(http.ResponseWriter).Write
+ if w.h.Write != nil {
+ f = w.h.Write(f)
+ }
+ return f(b)
+}
+
+func (w *rw) Flush() {
+ f := w.w.(http.Flusher).Flush
+ if w.h.Flush != nil {
+ f = w.h.Flush(f)
+ }
+ f()
+}
+
+func (w *rw) CloseNotify() <-chan bool {
+ f := w.w.(http.CloseNotifier).CloseNotify
+ if w.h.CloseNotify != nil {
+ f = w.h.CloseNotify(f)
+ }
+ return f()
+}
+
+func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ f := w.w.(http.Hijacker).Hijack
+ if w.h.Hijack != nil {
+ f = w.h.Hijack(f)
+ }
+ return f()
+}
+
+func (w *rw) ReadFrom(src io.Reader) (int64, error) {
+ f := w.w.(io.ReaderFrom).ReadFrom
+ if w.h.ReadFrom != nil {
+ f = w.h.ReadFrom(f)
+ }
+ return f(src)
+}
+
+type Unwrapper interface {
+ Unwrap() http.ResponseWriter
+}
+
+// Unwrap returns the underlying http.ResponseWriter from within zero or more
+// layers of httpsnoop wrappers.
+func Unwrap(w http.ResponseWriter) http.ResponseWriter {
+ if rw, ok := w.(Unwrapper); ok {
+ // recurse until rw.Unwrap() returns a non-Unwrapper
+ return Unwrap(rw.Unwrap())
+ } else {
+ return w
+ }
+}
diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md
deleted file mode 100644
index 3305db0f653..00000000000
--- a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md
+++ /dev/null
@@ -1,10 +0,0 @@
-Serious about security
-======================
-
-Square recognizes the important contributions the security research community
-can make. We therefore encourage reporting security issues with the code
-contained in this repository.
-
-If you believe you have discovered a security vulnerability, please follow the
-guidelines at .
-
diff --git a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
index 7820c2f4d78..7ae6cff9453 100644
--- a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
+++ b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
@@ -1,6 +1,23 @@
+# v3.0.2
+
+## Fixed
+
+ - DecryptMulti: handle decompression error (#19)
+
+## Changed
+
+ - jwe/CompactSerialize: improve performance (#67)
+ - Increase the default number of PBKDF2 iterations to 600k (#48)
+ - Return the proper algorithm for ECDSA keys (#45)
+
+## Added
+
+ - Add Thumbprint support for opaque signers (#38)
+
# v3.0.1
-Fixed:
+## Fixed
+
- Security issue: an attacker specifying a large "p2c" value can cause
JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md
index b90c7e5c6ba..57da65700cd 100644
--- a/vendor/github.com/go-jose/go-jose/v3/README.md
+++ b/vendor/github.com/go-jose/go-jose/v3/README.md
@@ -1,15 +1,18 @@
# Go JOSE
-[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
-[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
-[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
-[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose)
-[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt)
+[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
+[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v3)](https://github.com/go-jose/go-jose/actions)
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. This includes support for JSON Web Encryption,
JSON Web Signature, and JSON Web Token standards.
+**Help Wanted!** If you'd like to help us develop this library please reach
+out to css (at) css.bio. While I'm still working on keeping this maintained,
+I have limited time for in-depth development and could use some additional help.
+
**Disclaimer**: This library contains encryption software that is subject to
the U.S. Export Administration Regulations. You may not export, re-export,
transfer or download this code or any part of it in violation of any United
@@ -21,13 +24,13 @@ US maintained blocked list.
## Overview
The implementation follows the
-[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
-[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
-[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
+[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
Tables of supported algorithms are shown below. The library supports both
the compact and JWS/JWE JSON Serialization formats, and has optional support for
multiple recipients. It also comes with a small command-line utility
-([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
+([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util))
for dealing with JOSE messages in a shell.
**Note**: We use a forked version of the `encoding/json` package from the Go
@@ -38,29 +41,19 @@ libraries in other languages.
### Versions
-[Version 2](https://gopkg.in/go-jose/go-jose.v2)
-([branch](https://github.com/go-jose/go-jose/tree/v2),
-[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
-
- import "gopkg.in/go-jose/go-jose.v2"
-
[Version 3](https://github.com/go-jose/go-jose)
-([branch](https://github.com/go-jose/go-jose/tree/master),
-[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
+([branch](https://github.com/go-jose/go-jose/tree/v3),
+[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v3), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
import "github.com/go-jose/go-jose/v3"
-All new feature development takes place on the `master` branch, which we are
-preparing to release as version 3 soon. Version 2 will continue to receive
-critical bug and security fixes. Note that starting with version 3 we are
-using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
-
-Version 1 (on the `v1` branch) is frozen and not supported anymore.
+The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
+are still useable but not actively developed anymore.
### Supported algorithms
See below for a table of supported algorithms. Algorithm identifiers match
-the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
+the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
standard where possible. The Godoc reference has a list of constants.
Key encryption | Algorithm identifier(s)
@@ -103,20 +96,20 @@ allows attaching a key id.
Algorithm(s) | Corresponding types
:------------------------- | -------------------------------
- RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
- ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
- EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
+ RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
+ EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
AES, HMAC | []byte
1. Only available in version 2 or later of the package
## Examples
-[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
-[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3)
+[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt)
Examples can be found in the Godoc
reference for this package. The
-[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
+[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util)
subdirectory also contains a small command-line utility which might be useful
as an example as well.
diff --git a/vendor/github.com/go-jose/go-jose/v3/SECURITY.md b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md
new file mode 100644
index 00000000000..2f18a75a822
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md
@@ -0,0 +1,13 @@
+# Security Policy
+This document explains how to contact the Let's Encrypt security team to report security vulnerabilities.
+
+## Supported Versions
+| Version | Supported |
+| ------- | ----------|
+| >= v3 | ✓ |
+| v2 | ✗ |
+| v1 | ✗ |
+
+## Reporting a vulnerability
+
+Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email.
diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go
index 78abc326830..d4d4961b240 100644
--- a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go
+++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go
@@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm
switch alg {
case RS256, RS384, RS512:
+ // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the
+ // random parameter is legacy and ignored, and it can be nil.
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
case PS256, PS384, PS512:
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go
index 6901137e446..506d3b7b860 100644
--- a/vendor/github.com/go-jose/go-jose/v3/crypter.go
+++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go
@@ -21,7 +21,6 @@ import (
"crypto/rsa"
"errors"
"fmt"
- "reflect"
"github.com/go-jose/go-jose/v3/json"
)
@@ -76,14 +75,24 @@ type recipientKeyInfo struct {
type EncrypterOptions struct {
Compression CompressionAlgorithm
- // Optional map of additional keys to be inserted into the protected header
- // of a JWS object. Some specifications which make use of JWS like to insert
- // additional values here. All values must be JSON-serializable.
+ // Optional map of name/value pairs to be inserted into the protected
+ // header of a JWS object. Some specifications which make use of
+ // JWS require additional values here.
+ //
+ // Values will be serialized by [json.Marshal] and must be valid inputs to
+ // that function.
+ //
+ // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
ExtraHeaders map[HeaderKey]interface{}
}
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
-// if necessary. It returns itself and so can be used in a fluent style.
+// if necessary, and returns the updated EncrypterOptions.
+//
+// The v parameter will be serialized by [json.Marshal] and must be a valid
+// input to that function.
+//
+// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
if eo.ExtraHeaders == nil {
eo.ExtraHeaders = map[HeaderKey]interface{}{}
@@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
// default of 100000 will be used for the count and a 128-bit random salt will
// be generated.
type Recipient struct {
- Algorithm KeyAlgorithm
+ Algorithm KeyAlgorithm
+ // Key must have one of these types:
+ // - ed25519.PublicKey
+ // - *ecdsa.PublicKey
+ // - *rsa.PublicKey
+ // - *JSONWebKey
+ // - JSONWebKey
+ // - []byte (a symmetric key)
+ // - Any type that satisfies the OpaqueKeyEncrypter interface
+ //
+ // The type of Key must match the value of Algorithm.
Key interface{}
KeyID string
PBES2Count int
@@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions)
switch rcpt.Algorithm {
case DIRECT:
// Direct encryption mode must be treated differently
- if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
+ keyBytes, ok := rawKey.([]byte)
+ if !ok {
return nil, ErrUnsupportedKeyType
}
- if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
+ if encrypter.cipher.keySize() != len(keyBytes) {
return nil, ErrInvalidKeySize
}
encrypter.keyGenerator = staticKeyGenerator{
- key: rawKey.([]byte),
+ key: keyBytes,
}
- recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes)
recipientInfo.keyID = keyID
if rcpt.KeyID != "" {
recipientInfo.keyID = rcpt.KeyID
@@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions)
return encrypter, nil
case ECDH_ES:
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
- typeOf := reflect.TypeOf(rawKey)
- if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
+ keyDSA, ok := rawKey.(*ecdsa.PublicKey)
+ if !ok {
return nil, ErrUnsupportedKeyType
}
encrypter.keyGenerator = ecKeyGenerator{
size: encrypter.cipher.keySize(),
algID: string(enc),
- publicKey: rawKey.(*ecdsa.PublicKey),
+ publicKey: keyDSA,
}
- recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA)
recipientInfo.keyID = keyID
if rcpt.KeyID != "" {
recipientInfo.keyID = rcpt.KeyID
@@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
recipient.keyID = encryptionKey.KeyID
return recipient, err
- }
- if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
- return newOpaqueKeyEncrypter(alg, encrypter)
+ case OpaqueKeyEncrypter:
+ return newOpaqueKeyEncrypter(alg, encryptionKey)
}
return recipientKeyInfo{}, ErrUnsupportedKeyType
}
@@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
return newDecrypter(decryptionKey.Key)
case *JSONWebKey:
return newDecrypter(decryptionKey.Key)
+ case OpaqueKeyDecrypter:
+ return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil
+ default:
+ return nil, ErrUnsupportedKeyType
}
- if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
- return &opaqueKeyDecrypter{decrypter: okd}, nil
- }
- return nil, ErrUnsupportedKeyType
}
// Implementation of encrypt method producing a JWE object.
@@ -403,9 +422,24 @@ func (ctx *genericEncrypter) Options() EncrypterOptions {
}
}
-// Decrypt and validate the object and return the plaintext. Note that this
-// function does not support multi-recipient, if you desire multi-recipient
+// Decrypt and validate the object and return the plaintext. This
+// function does not support multi-recipient. If you desire multi-recipient
// decryption use DecryptMulti instead.
+//
+// The decryptionKey argument must contain a private or symmetric key
+// and must have one of these types:
+// - *ecdsa.PrivateKey
+// - *rsa.PrivateKey
+// - *JSONWebKey
+// - JSONWebKey
+// - *JSONWebKeySet
+// - JSONWebKeySet
+// - []byte (a symmetric key)
+// - string (a symmetric key)
+// - Any type that satisfies the OpaqueKeyDecrypter interface.
+//
+// Note that ed25519 is only available for signatures, not encryption, so is
+// not an option here.
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
headers := obj.mergedHeaders(nil)
@@ -462,15 +496,21 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
// The "zip" header parameter may only be present in the protected header.
if comp := obj.protected.getCompression(); comp != "" {
plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
}
- return plaintext, err
+ return plaintext, nil
}
// DecryptMulti decrypts and validates the object and returns the plaintexts,
// with support for multiple recipients. It returns the index of the recipient
// for which the decryption was successful, the merged headers for that recipient,
// and the plaintext.
+//
+// The decryptionKey argument must have one of the types allowed for the
+// decryptionKey argument of Decrypt().
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
globalHeaders := obj.mergedHeaders(nil)
@@ -532,7 +572,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
// The "zip" header parameter may only be present in the protected header.
if comp := obj.protected.getCompression(); comp != "" {
- plaintext, _ = decompress(comp, plaintext)
+ plaintext, err = decompress(comp, plaintext)
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
+ }
}
sanitized, err := headers.sanitized()
diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go
index 71ec1c419b1..0ad40ca085f 100644
--- a/vendor/github.com/go-jose/go-jose/v3/doc.go
+++ b/vendor/github.com/go-jose/go-jose/v3/doc.go
@@ -15,13 +15,11 @@
*/
/*
-
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. It implements encryption and signing based on
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
Token support available in a sub-package. The library supports both the compact
and JWS/JWE JSON Serialization formats, and has optional support for multiple
recipients.
-
*/
package jose
diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go
index 968a42496e1..62f8b8ad318 100644
--- a/vendor/github.com/go-jose/go-jose/v3/encoding.go
+++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go
@@ -189,3 +189,36 @@ func base64URLDecode(value string) ([]byte, error) {
value = strings.TrimRight(value, "=")
return base64.RawURLEncoding.DecodeString(value)
}
+
+func base64EncodeLen(sl []byte) int {
+ return base64.RawURLEncoding.EncodedLen(len(sl))
+}
+
+func base64JoinWithDots(inputs ...[]byte) string {
+ if len(inputs) == 0 {
+ return ""
+ }
+
+ // Count of dots.
+ totalCount := len(inputs) - 1
+
+ for _, input := range inputs {
+ totalCount += base64EncodeLen(input)
+ }
+
+ out := make([]byte, totalCount)
+ startEncode := 0
+ for i, input := range inputs {
+ base64.RawURLEncoding.Encode(out[startEncode:], input)
+
+ if i == len(inputs)-1 {
+ continue
+ }
+
+ startEncode += base64EncodeLen(input)
+ out[startEncode] = '.'
+ startEncode++
+ }
+
+ return string(out)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go
index 4dbc4146cf9..50634dd8478 100644
--- a/vendor/github.com/go-jose/go-jose/v3/json/decode.go
+++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go
@@ -75,14 +75,13 @@ import (
//
// The JSON null value unmarshals into an interface, map, pointer, or slice
// by setting that Go value to nil. Because null is often used in JSON to mean
-// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// “not present,†unmarshaling a JSON null into any other Go type has no effect
// on the value and produces no error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
-//
func Unmarshal(data []byte, v interface{}) error {
// Check for well-formedness.
// Avoids filling out half a data structure
diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go
index ea0a1361987..98de68ce1e9 100644
--- a/vendor/github.com/go-jose/go-jose/v3/json/encode.go
+++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go
@@ -58,6 +58,7 @@ import (
// becomes a member of the object unless
// - the field's tag is "-", or
// - the field is empty and its tag specifies the "omitempty" option.
+//
// The empty values are false, 0, any
// nil pointer or interface value, and any array, slice, map, or string of
// length zero. The object's default key string is the struct field name
@@ -65,28 +66,28 @@ import (
// the struct field's tag value is the key name, followed by an optional comma
// and options. Examples:
//
-// // Field is ignored by this package.
-// Field int `json:"-"`
+// // Field is ignored by this package.
+// Field int `json:"-"`
//
-// // Field appears in JSON as key "myName".
-// Field int `json:"myName"`
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
//
-// // Field appears in JSON as key "myName" and
-// // the field is omitted from the object if its value is empty,
-// // as defined above.
-// Field int `json:"myName,omitempty"`
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
//
-// // Field appears in JSON as key "Field" (the default), but
-// // the field is skipped if empty.
-// // Note the leading comma.
-// Field int `json:",omitempty"`
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// integer, or boolean types. This extra level of encoding is sometimes used
// when communicating with JavaScript programs:
//
-// Int64String int64 `json:",string"`
+// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, dollar signs, percent signs, hyphens,
@@ -133,7 +134,6 @@ import (
// JSON cannot represent cyclic data structures and Marshal does not
// handle them. Passing cyclic structures to Marshal will result in
// an infinite recursion.
-//
func Marshal(v interface{}) ([]byte, error) {
e := &encodeState{}
err := e.marshal(v)
diff --git a/vendor/github.com/go-jose/go-jose/v3/json/stream.go b/vendor/github.com/go-jose/go-jose/v3/json/stream.go
index 9b2b926b033..f03b171e6a4 100644
--- a/vendor/github.com/go-jose/go-jose/v3/json/stream.go
+++ b/vendor/github.com/go-jose/go-jose/v3/json/stream.go
@@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil)
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
-//
type Token interface{}
const (
diff --git a/vendor/github.com/go-jose/go-jose/v3/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go
index bce30450437..4267ac75025 100644
--- a/vendor/github.com/go-jose/go-jose/v3/jwe.go
+++ b/vendor/github.com/go-jose/go-jose/v3/jwe.go
@@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) {
serializedProtected := mustSerializeJSON(obj.protected)
- return fmt.Sprintf(
- "%s.%s.%s.%s.%s",
- base64.RawURLEncoding.EncodeToString(serializedProtected),
- base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
- base64.RawURLEncoding.EncodeToString(obj.iv),
- base64.RawURLEncoding.EncodeToString(obj.ciphertext),
- base64.RawURLEncoding.EncodeToString(obj.tag)), nil
+ return base64JoinWithDots(
+ serializedProtected,
+ obj.recipients[0].encryptedKey,
+ obj.iv,
+ obj.ciphertext,
+ obj.tag,
+ ), nil
}
// FullSerialize serializes an object using the full JSON serialization format.
diff --git a/vendor/github.com/go-jose/go-jose/v3/jwk.go b/vendor/github.com/go-jose/go-jose/v3/jwk.go
index 78ff5aca5b3..e4021959ab4 100644
--- a/vendor/github.com/go-jose/go-jose/v3/jwk.go
+++ b/vendor/github.com/go-jose/go-jose/v3/jwk.go
@@ -67,9 +67,21 @@ type rawJSONWebKey struct {
X5tSHA256 string `json:"x5t#S256,omitempty"`
}
-// JSONWebKey represents a public or private key in JWK format.
+// JSONWebKey represents a public or private key in JWK format. It can be
+// marshaled into JSON and unmarshaled from JSON.
type JSONWebKey struct {
- // Cryptographic key, can be a symmetric or asymmetric key.
+ // Key is the Go in-memory representation of this key. It must have one
+ // of these types:
+ // - ed25519.PublicKey
+ // - ed25519.PrivateKey
+ // - *ecdsa.PublicKey
+ // - *ecdsa.PrivateKey
+ // - *rsa.PublicKey
+ // - *rsa.PrivateKey
+ // - []byte (a symmetric key)
+ //
+ // When marshaling this JSONWebKey into JSON, the "kty" header parameter
+ // will be automatically set based on the type of this field.
Key interface{}
// Key identifier, parsed from `kid` header.
KeyID string
@@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
input, err = rsaThumbprintInput(key.N, key.E)
case ed25519.PrivateKey:
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
+ case OpaqueSigner:
+ return key.Public().Thumbprint(hash)
default:
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
}
diff --git a/vendor/github.com/go-jose/go-jose/v3/jws.go b/vendor/github.com/go-jose/go-jose/v3/jws.go
index 865f16ad335..e37007dbb85 100644
--- a/vendor/github.com/go-jose/go-jose/v3/jws.go
+++ b/vendor/github.com/go-jose/go-jose/v3/jws.go
@@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) {
return "", ErrNotSupported
}
- serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected))
- payload := ""
- signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)
+ serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
+ var payload []byte
if !detached {
- payload = base64.RawURLEncoding.EncodeToString(obj.payload)
+ payload = obj.payload
}
- return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil
+ return base64JoinWithDots(
+ serializedProtected,
+ payload,
+ obj.Signatures[0].Signature,
+ ), nil
}
// CompactSerialize serializes an object using the compact serialization format.
diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go b/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go
index 286be1d2fe9..b2a8dc8d4d4 100644
--- a/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go
+++ b/vendor/github.com/go-jose/go-jose/v3/jwt/claims.go
@@ -119,7 +119,7 @@ func (s Audience) MarshalJSON() ([]byte, error) {
return json.Marshal([]string(s))
}
-//Contains checks whether a given string is included in the Audience
+// Contains checks whether a given string is included in the Audience
func (s Audience) Contains(v string) bool {
for _, a := range s {
if a == v {
diff --git a/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go b/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go
index 4cf97b54e78..30b886ef0e9 100644
--- a/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go
+++ b/vendor/github.com/go-jose/go-jose/v3/jwt/doc.go
@@ -15,8 +15,6 @@
*/
/*
-
Package jwt provides an implementation of the JSON Web Token standard.
-
*/
package jwt
diff --git a/vendor/github.com/go-jose/go-jose/v3/opaque.go b/vendor/github.com/go-jose/go-jose/v3/opaque.go
index fc3e8d2ef6e..68db085ef6b 100644
--- a/vendor/github.com/go-jose/go-jose/v3/opaque.go
+++ b/vendor/github.com/go-jose/go-jose/v3/opaque.go
@@ -121,7 +121,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
return oke.encrypter.encryptKey(cek, alg)
}
-//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
+// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
type OpaqueKeyDecrypter interface {
DecryptKey(encryptedKey []byte, header Header) ([]byte, error)
}
diff --git a/vendor/github.com/go-jose/go-jose/v3/shared.go b/vendor/github.com/go-jose/go-jose/v3/shared.go
index fc2505e0eb4..489a04e32aa 100644
--- a/vendor/github.com/go-jose/go-jose/v3/shared.go
+++ b/vendor/github.com/go-jose/go-jose/v3/shared.go
@@ -183,8 +183,13 @@ type Header struct {
// Unverified certificate chain parsed from x5c header.
certificates []*x509.Certificate
- // Any headers not recognised above get unmarshalled
- // from JSON in a generic manner and placed in this map.
+ // At parse time, each header parameter with a name other than "kid",
+ // "jwk", "alg", "nonce", or "x5c" will have its value passed to
+ // [json.Unmarshal] to unmarshal it into an interface value.
+ // The resulting value will be stored in this map, with the header
+ // parameter name as the key.
+ //
+ // [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal
ExtraHeaders map[HeaderKey]interface{}
}
diff --git a/vendor/github.com/go-jose/go-jose/v3/signing.go b/vendor/github.com/go-jose/go-jose/v3/signing.go
index 81d55f58759..52f3d856040 100644
--- a/vendor/github.com/go-jose/go-jose/v3/signing.go
+++ b/vendor/github.com/go-jose/go-jose/v3/signing.go
@@ -40,6 +40,15 @@ type Signer interface {
}
// SigningKey represents an algorithm/key used to sign a message.
+//
+// Key must have one of these types:
+// - ed25519.PrivateKey
+// - *ecdsa.PrivateKey
+// - *rsa.PrivateKey
+// - *JSONWebKey
+// - JSONWebKey
+// - []byte (an HMAC key)
+// - Any type that satisfies the OpaqueSigner interface
type SigningKey struct {
Algorithm SignatureAlgorithm
Key interface{}
@@ -52,12 +61,22 @@ type SignerOptions struct {
// Optional map of additional keys to be inserted into the protected header
// of a JWS object. Some specifications which make use of JWS like to insert
- // additional values here. All values must be JSON-serializable.
+ // additional values here.
+ //
+ // Values will be serialized by [json.Marshal] and must be valid inputs to
+ // that function.
+ //
+ // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
ExtraHeaders map[HeaderKey]interface{}
}
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
-// if necessary. It returns itself and so can be used in a fluent style.
+// if necessary, and returns the updated SignerOptions.
+//
+// The v argument will be serialized by [json.Marshal] and must be a valid
+// input to that function.
+//
+// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
if so.ExtraHeaders == nil {
so.ExtraHeaders = map[HeaderKey]interface{}{}
@@ -173,11 +192,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
return newVerifier(verificationKey.Key)
case *JSONWebKey:
return newVerifier(verificationKey.Key)
+ case OpaqueVerifier:
+ return &opaqueVerifier{verifier: verificationKey}, nil
+ default:
+ return nil, ErrUnsupportedKeyType
}
- if ov, ok := verificationKey.(OpaqueVerifier); ok {
- return &opaqueVerifier{verifier: ov}, nil
- }
- return nil, ErrUnsupportedKeyType
}
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
@@ -204,11 +223,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient
return newJWKSigner(alg, signingKey)
case *JSONWebKey:
return newJWKSigner(alg, *signingKey)
+ case OpaqueSigner:
+ return newOpaqueSigner(alg, signingKey)
+ default:
+ return recipientSigInfo{}, ErrUnsupportedKeyType
}
- if signer, ok := signingKey.(OpaqueSigner); ok {
- return newOpaqueSigner(alg, signer)
- }
- return recipientSigInfo{}, ErrUnsupportedKeyType
}
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
@@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions {
}
// Verify validates the signature on the object and returns the payload.
-// This function does not support multi-signature, if you desire multi-sig
+// This function does not support multi-signature. If you desire multi-signature
// verification use VerifyMulti instead.
//
// Be careful when verifying signatures based on embedded JWKs inside the
// payload header. You cannot assume that the key received in a payload is
// trusted.
+//
+// The verificationKey argument must have one of these types:
+// - ed25519.PublicKey
+// - *ecdsa.PublicKey
+// - *rsa.PublicKey
+// - *JSONWebKey
+// - JSONWebKey
+// - []byte (an HMAC key)
+// - Any type that implements the OpaqueVerifier interface.
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
err := obj.DetachedVerify(obj.payload, verificationKey)
if err != nil {
@@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
// most cases, you will probably want to use Verify instead. DetachedVerify
// is only useful if you have a payload and signature that are separated from
// each other.
+//
+// The verificationKey argument must have one of the types allowed for the
+// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
key := tryJWKS(verificationKey, obj.headers()...)
verifier, err := newVerifier(key)
@@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
// returns the index of the signature that was verified, along with the signature
// object and the payload. We return the signature and index to guarantee that
// callers are getting the verified value.
+//
+// The verificationKey argument must have one of the types allowed for the
+// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
if err != nil {
@@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
// DetachedVerifyMulti is only useful if you have a payload and signature that are
// separated from each other, and the signature can have multiple signers at the
// same time.
+//
+// The verificationKey argument must have one of the types allowed for the
+// verificationKey argument of JSONWebSignature.Verify().
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
key := tryJWKS(verificationKey, obj.headers()...)
verifier, err := newVerifier(key)
diff --git a/vendor/github.com/go-jose/go-jose/v3/symmetric.go b/vendor/github.com/go-jose/go-jose/v3/symmetric.go
index 1ffd2708b21..10d8e19fd10 100644
--- a/vendor/github.com/go-jose/go-jose/v3/symmetric.go
+++ b/vendor/github.com/go-jose/go-jose/v3/symmetric.go
@@ -40,12 +40,17 @@ var RandReader = rand.Reader
const (
// RFC7518 recommends a minimum of 1,000 iterations:
- // https://tools.ietf.org/html/rfc7518#section-4.8.1.2
+ // - https://tools.ietf.org/html/rfc7518#section-4.8.1.2
+ //
// NIST recommends a minimum of 10,000:
- // https://pages.nist.gov/800-63-3/sp800-63b.html
- // 1Password uses 100,000:
- // https://support.1password.com/pbkdf2/
- defaultP2C = 100000
+ // - https://pages.nist.gov/800-63-3/sp800-63b.html
+ //
+ // 1Password increased in 2023 from 100,000 to 650,000:
+ // - https://support.1password.com/pbkdf2/
+ //
+ // OWASP recommended 600,000 in Dec 2022:
+ // - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2
+ defaultP2C = 600000
// Default salt size: 128 bits
defaultP2SSize = 16
)
diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md
index a0931951df8..8067794657c 100644
--- a/vendor/github.com/go-kit/log/README.md
+++ b/vendor/github.com/go-kit/log/README.md
@@ -1,5 +1,10 @@
# package log
+[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log)
+[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log)
+[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml)
+[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main)
+
`package log` provides a minimal interface for structured logging in services.
It may be wrapped to encode conventions, enforce type-safety, provide leveled
logging, and so on. It can be used for both typical application log events,
diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go
index 0cedbf82478..d0faed4f098 100644
--- a/vendor/github.com/go-kit/log/json_logger.go
+++ b/vendor/github.com/go-kit/log/json_logger.go
@@ -68,7 +68,7 @@ func safeString(str fmt.Stringer) (s string) {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s = "NULL"
} else {
- panic(panicVal)
+ s = fmt.Sprintf("PANIC in String method: %v", panicVal)
}
}
}()
@@ -82,7 +82,7 @@ func safeError(err error) (s interface{}) {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s = nil
} else {
- panic(panicVal)
+ s = fmt.Sprintf("PANIC in Error method: %v", panicVal)
}
}
}()
diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go
index 505d307b11b..fd681dcf922 100644
--- a/vendor/github.com/go-kit/log/level/doc.go
+++ b/vendor/github.com/go-kit/log/level/doc.go
@@ -7,6 +7,17 @@
// logger = level.NewFilter(logger, level.AllowInfo()) // <--
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
//
+// It's also possible to configure log level from a string. For instance from
+// a flag, environment variable or configuration file.
+//
+// fs := flag.NewFlagSet("myprogram")
+// lvl := fs.String("log", "info", "debug, info, warn, error")
+//
+// var logger log.Logger
+// logger = log.NewLogfmtLogger(os.Stderr)
+// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <--
+// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
+//
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
// helper methods to emit leveled log events.
//
diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go
index c94756c6bea..c641d985524 100644
--- a/vendor/github.com/go-kit/log/level/level.go
+++ b/vendor/github.com/go-kit/log/level/level.go
@@ -1,6 +1,14 @@
package level
-import "github.com/go-kit/log"
+import (
+ "errors"
+ "strings"
+
+ "github.com/go-kit/log"
+)
+
+// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse.
+var ErrInvalidLevelString = errors.New("invalid level string")
// Error returns a logger that includes a Key/ErrorValue pair.
func Error(logger log.Logger) log.Logger {
@@ -66,6 +74,22 @@ func (l *logger) Log(keyvals ...interface{}) error {
// Option sets a parameter for the leveled logger.
type Option func(*logger)
+// Allow the provided log level to pass.
+func Allow(v Value) Option {
+ switch v {
+ case debugValue:
+ return AllowDebug()
+ case infoValue:
+ return AllowInfo()
+ case warnValue:
+ return AllowWarn()
+ case errorValue:
+ return AllowError()
+ default:
+ return AllowNone()
+ }
+}
+
// AllowAll is an alias for AllowDebug.
func AllowAll() Option {
return AllowDebug()
@@ -100,6 +124,33 @@ func allowed(allowed level) Option {
return func(l *logger) { l.allowed = allowed }
}
+// Parse a string to its corresponding level value. Valid strings are "debug",
+// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and
+// strings.ToLower.
+func Parse(level string) (Value, error) {
+ switch strings.TrimSpace(strings.ToLower(level)) {
+ case debugValue.name:
+ return debugValue, nil
+ case infoValue.name:
+ return infoValue, nil
+ case warnValue.name:
+ return warnValue, nil
+ case errorValue.name:
+ return errorValue, nil
+ default:
+ return nil, ErrInvalidLevelString
+ }
+}
+
+// ParseDefault calls Parse and returns the default Value on error.
+func ParseDefault(level string, def Value) Value {
+ v, err := Parse(level)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
// ErrNotAllowed sets the error to return from Log when it squelches a log
// event disallowed by the configured Allow[Level] option. By default,
// ErrNotAllowed is nil; in this case the log event is squelched with no
diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf
new file mode 100644
index 00000000000..528438b97d2
--- /dev/null
+++ b/vendor/github.com/go-kit/log/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all"]
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index ab593118131..8969526a6e5 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
+[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
@@ -73,6 +74,30 @@ received:
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
+When the Go developers started developing such an interface with
+[slog](https://github.com/golang/go/issues/56345), they adopted some of the
+logr design but also left out some parts and changed others:
+
+| Feature | logr | slog |
+|---------|------|------|
+| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
+| Low-level API | `LogSink` | `Handler` |
+| Stack unwinding | done by `LogSink` | done by `Logger` |
+| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
+| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
+| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
+| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
+| Passing logger via context | `NewContext`, `FromContext` | no API |
+| Adding a name to a logger | `WithName` | no API |
+| Modify verbosity of log entries in a call chain | `V` | no API |
+| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
+
+The high-level slog API is explicitly meant to be one of many different APIs
+that can be layered on top of a shared `slog.Handler`. logr is one such
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
+
### Inspiration
Before you consider this package, please read [this blog post by the
@@ -118,6 +143,103 @@ There are implementations for the following logging libraries:
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+## slog interoperability
+
+Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
+As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
+slog API.
+
+### Using a `logr.LogSink` as backend for slog
+
+Ideally, a logr sink implementation should support both logr and slog by
+implementing both the normal logr interface(s) and `SlogSink`. Because
+of a conflict in the parameters of the common `Enabled` method, it is [not
+possible to implement both slog.Handler and logr.Sink in the same
+type](https://github.com/golang/go/issues/59110).
+
+If both are supported, log calls can go from the high-level APIs to the backend
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
+convert back and forth without adding additional wrappers, with one exception:
+when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
+log calls.
+
+Such an implementation should also support values that implement specific
+interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
+`slog.GroupValue`). logr does not convert those.
+
+Not supporting slog has several drawbacks:
+- Recording source code locations works correctly if the handler gets called
+ through `slog.Logger`, but may be wrong in other cases. That's because a
+ `logr.Sink` does its own stack unwinding instead of using the program counter
+ provided by the high-level API.
+- slog levels <= 0 can be mapped to logr levels by negating the level without a
+ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
+ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
+ because logr does not support "more important than info" levels.
+- The slog group concept is supported by prefixing each key in a key/value
+ pair with the group names, separated by a dot. For structured output like
+ JSON it would be better to group the key/value pairs inside an object.
+- Special slog values and interfaces don't work as expected.
+- The overhead is likely to be higher.
+
+These drawbacks are severe enough that applications using a mixture of slog and
+logr should switch to a different backend.
+
+### Using a `slog.Handler` as backend for logr
+
+Using a plain `slog.Handler` without support for logr works better than the
+other direction:
+- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
+ by negating them.
+- Stack unwinding is done by the `SlogSink` and the resulting program
+ counter is passed to the `slog.Handler`.
+- Names added via `Logger.WithName` are gathered and recorded in an additional
+ attribute with `logger` as key and the names separated by slash as value.
+- `Logger.Error` is turned into a log record with `slog.LevelError` as level
+ and an additional attribute with `err` as key, if an error was provided.
+
+The main drawback is that `logr.Marshaler` will not be supported. Types should
+ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
+with logr implementations without slog support is not important, then
+`slog.Valuer` is sufficient.
+
+### Context support for slog
+
+Storing a logger in a `context.Context` is not supported by
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
+
## FAQ
### Conceptual
@@ -241,7 +363,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
-info-type logs.)
+info-type logs). For reference, slog pre-defines -4 for debug logs
+(corresponds to 4 in logr), which matches what is
+[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
#### How do I choose my keys?
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
new file mode 100644
index 00000000000..1ca756fc7b3
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to go-logr-security@googlegroups.com
+- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 00000000000..de8bcc3ad89
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 00000000000..f012f9a18e8
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 00000000000..065ef0b8280
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return Logger{}, notFoundError{}
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return v, nil
+ case *slog.Logger:
+ return FromSlogHandler(v.Handler()), nil
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return nil
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return slog.New(ToSlogHandler(v))
+ case *slog.Logger:
+ return v
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if logger, err := FromContext(ctx); err == nil {
+ return logger
+ }
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index e52f0cd01e2..fb2f866f4b7 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -100,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout.
TimestampFormat string
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
@@ -116,17 +121,17 @@ type Options struct {
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
- RenderBuiltinsHook func(kvList []interface{}) []interface{}
+ RenderBuiltinsHook func(kvList []any) []any
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
- RenderValuesHook func(kvList []interface{}) []interface{}
+ RenderValuesHook func(kvList []any) []any
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
- RenderArgsHook func(kvList []interface{}) []interface{}
+ RenderArgsHook func(kvList []any) []any
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
@@ -163,7 +168,7 @@ func (l fnlogger) WithName(name string) logr.LogSink {
return &l
}
-func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
@@ -173,12 +178,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
return &l
}
-func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
-func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
@@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
f := Formatter{
outputFormat: outfmt,
prefix: "",
@@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
- outputFormat outputFormat
- prefix string
- values []interface{}
- valuesStr string
- depth int
- opts *Options
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ parentValuesStr string
+ depth int
+ opts *Options
+ group string // for slog groups
+ groupDepth int
}
// outputFormat indicates which outputFormat to use.
@@ -246,40 +258,69 @@ const (
)
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
-type PseudoStruct []interface{}
+type PseudoStruct []any
// render produces a log line, ready to use.
-func (f Formatter) render(builtins, args []interface{}) string {
+func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
- buf.WriteByte('{')
+ buf.WriteByte('{') // for the whole line
}
+
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
- if len(f.valuesStr) > 0 {
+
+ if f.parentValuesStr != "" {
if continuing {
- if f.outputFormat == outputJSON {
- buf.WriteByte(',')
- } else {
- buf.WriteByte(' ')
- }
+ buf.WriteByte(f.comma())
}
+ buf.WriteString(f.parentValuesStr)
continuing = true
+ }
+
+ groupDepth := f.groupDepth
+ if f.group != "" {
+ if f.valuesStr != "" || len(args) != 0 {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{') // for the group
+ continuing = false
+ } else {
+ // The group was empty
+ groupDepth--
+ }
+ }
+
+ if f.valuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
buf.WriteString(f.valuesStr)
+ continuing = true
}
+
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
+
+ for i := 0; i < groupDepth; i++ {
+ buf.WriteByte('}') // for the groups
+ }
+
if f.outputFormat == outputJSON {
- buf.WriteByte('}')
+ buf.WriteByte('}') // for the whole line
}
+
return buf.String()
}
@@ -292,15 +333,22 @@ func (f Formatter) render(builtins, args []interface{}) string {
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
-func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
+ copied := false
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
@@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
if i > 0 || continuing {
if f.outputFormat == outputJSON {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
@@ -316,25 +364,36 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
}
}
- if escapeKeys {
- buf.WriteString(prettyString(k))
- } else {
- // this is faster
- buf.WriteByte('"')
- buf.WriteString(k)
- buf.WriteByte('"')
- }
- if f.outputFormat == outputJSON {
- buf.WriteByte(':')
- } else {
- buf.WriteByte('=')
- }
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
buf.WriteString(f.pretty(v))
}
return kvList
}
-func (f Formatter) pretty(value interface{}) string {
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@@ -343,7 +402,7 @@ const (
)
// TODO: This is not fast. Most of the overhead goes here.
-func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `""`
}
@@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
- buf.WriteByte(':')
+ buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
@@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
continue
}
if printComma {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
name = fld.Name
}
// field names can't contain characters which need escaping
- buf.WriteByte('"')
- buf.WriteString(name)
- buf.WriteByte('"')
- buf.WriteByte(':')
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
@@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
i := 0
for it.Next() {
if i > 0 {
- buf.WriteByte(',')
+ buf.WriteByte(f.comma())
}
// If a map key supports TextMarshaler, use it.
keystr := ""
@@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
}
buf.WriteString(keystr)
- buf.WriteByte(':')
+ buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
@@ -614,7 +671,7 @@ func isEmpty(v reflect.Value) bool {
return false
}
-func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
+func invokeMarshaler(m logr.Marshaler) (ret any) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("", r)
@@ -675,12 +732,12 @@ func (f Formatter) caller() Caller {
const noValue = ""
-func (f Formatter) nonStringKey(v interface{}) string {
+func (f Formatter) nonStringKey(v any) string {
return fmt.Sprintf("", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
-func (f Formatter) snippet(v interface{}) string {
+func (f Formatter) snippet(v any) string {
const snipLen = 16
snip := f.pretty(v)
@@ -693,7 +750,7 @@ func (f Formatter) snippet(v interface{}) string {
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
-func (f Formatter) sanitize(kvList []interface{}) []interface{} {
+func (f Formatter) sanitize(kvList []any) []any {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
@@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []interface{}) []interface{} {
return kvList
}
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(group string) {
+ // Unnamed groups are just inlined.
+ if group == "" {
+ return
+ }
+
+ // Any saved values can no longer be changed.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ continuing := false
+
+ if f.parentValuesStr != "" {
+ buf.WriteString(f.parentValuesStr)
+ continuing = true
+ }
+
+ if f.group != "" && f.valuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{') // for the group
+ continuing = false
+ }
+
+ if f.valuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.valuesStr)
+ }
+
+ // NOTE: We don't close the scope here - that's done later, when a log line
+ // is actually rendered (because we have N scopes to close).
+
+ f.parentValuesStr = buf.String()
+
+ // Start collecting new values.
+ f.group = group
+ f.groupDepth++
+ f.valuesStr = ""
+ f.values = nil
+}
+
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
@@ -727,8 +831,8 @@ func (f Formatter) GetDepth() int {
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
-func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
- args := make([]interface{}, 0, 64) // using a constant here impacts perf
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@@ -740,15 +844,18 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
- args = append(args, "level", level, "msg", msg)
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
return prefix, f.render(args, kvList)
}
// FormatError renders an Error log message into strings. The prefix will be
-// empty when no names were set (via AddNames), or when the output is
+// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
-func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
- args := make([]interface{}, 0, 64) // using a constant here impacts perf
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@@ -761,12 +868,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
- var loggableErr interface{}
+ var loggableErr any
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
- return f.prefix, f.render(args, kvList)
+ return prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
@@ -781,7 +888,7 @@ func (f *Formatter) AddName(name string) {
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
-func (f *Formatter) AddValues(kvList []interface{}) {
+func (f *Formatter) AddValues(kvList []any) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 00000000000..7bd84761e2d
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
index e027aea3fd3..b4428e105b4 100644
--- a/vendor/github.com/go-logr/logr/logr.go
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -127,9 +127,9 @@ limitations under the License.
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
-// Calling methods with the null logger (Logger{}) as instance will crash
-// because it has no LogSink. Therefore this null logger should never be passed
-// around. For cases where passing a logger is optional, a pointer to Logger
+// The zero logger (= Logger{}) is identical to Discard() and discards all log
+// entries. Code that receives a Logger by value can simply call it, the methods
+// will never crash. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
// # Key Naming Conventions
@@ -207,10 +207,6 @@ limitations under the License.
// those.
package logr
-import (
- "context"
-)
-
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines.
@@ -258,6 +254,12 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
+ // Some implementations of LogSink look at the caller in Enabled (e.g.
+ // different verbosity levels per package or file), but we only pass one
+ // CallDepth in (via Init). This means that all calls from Logger to the
+ // LogSink's Enabled, Info, and Error methods must have the same number of
+ // frames. In other words, Logger methods can't call other Logger methods
+ // which call these LogSink methods unless we do it the same in all paths.
return l.sink != nil && l.sink.Enabled(l.level)
}
@@ -267,11 +269,11 @@ func (l Logger) Enabled() bool {
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
-func (l Logger) Info(msg string, keysAndValues ...interface{}) {
+func (l Logger) Info(msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
- if l.Enabled() {
+ if l.sink.Enabled(l.level) { // see comment in Enabled
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -289,7 +291,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
-func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+func (l Logger) Error(err error, msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
@@ -314,9 +316,16 @@ func (l Logger) V(level int) Logger {
return l
}
+// GetV returns the verbosity level of the logger. If the logger's LogSink is
+// nil as in the Discard logger, this will always return 0.
+func (l Logger) GetV() int {
+ // 0 if l.sink nil because of the if check in V above.
+ return l.level
+}
+
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
-func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+func (l Logger) WithValues(keysAndValues ...any) Logger {
if l.sink == nil {
return l
}
@@ -397,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil
}
-// contextKey is how we find Loggers in a context.Context.
-type contextKey struct{}
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v, nil
- }
-
- return Logger{}, notFoundError{}
-}
-
-// notFoundError exists to carry an IsNotFound method.
-type notFoundError struct{}
-
-func (notFoundError) Error() string {
- return "no logr.Logger was present"
-}
-
-func (notFoundError) IsNotFound() bool {
- return true
-}
-
-// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v
- }
-
- return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
- return context.WithValue(ctx, contextKey{}, logger)
-}
-
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {
@@ -467,15 +437,15 @@ type LogSink interface {
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
- Info(level int, msg string, keysAndValues ...interface{})
+ Info(level int, msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
- Error(err error, msg string, keysAndValues ...interface{})
+ Error(err error, msg string, keysAndValues ...any)
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
- WithValues(keysAndValues ...interface{}) LogSink
+ WithValues(keysAndValues ...any) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
@@ -546,5 +516,5 @@ type Marshaler interface {
// with exported fields
//
// It may return any value of any type.
- MarshalLog() interface{}
+ MarshalLog() any
}
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 00000000000..82d1ba49481
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogHandler struct {
+ // May be nil, in which case all logs get discarded.
+ sink LogSink
+ // Non-nil if sink is non-nil and implements SlogSink.
+ slogSink SlogSink
+
+ // groupPrefix collects values from WithGroup calls. It gets added as
+ // prefix to value keys when handling a log record.
+ groupPrefix string
+
+ // levelBias can be set when constructing the handler to influence the
+ // slog.Level of log records. A positive levelBias reduces the
+ // slog.Level value. slog has no API to influence this value after the
+ // handler got created, so it can only be set indirectly through
+ // Logger.V.
+ levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+ return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+ return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+ if l.slogSink != nil {
+ // Only adjust verbosity level of log entries < slog.LevelError.
+ if record.Level < slog.LevelError {
+ record.Level -= l.levelBias
+ }
+ return l.slogSink.Handle(ctx, record)
+ }
+
+ // No need to check for nil sink here because Handle will only be called
+ // when Enabled returned true.
+
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ return true
+ })
+ if record.Level >= slog.LevelError {
+ l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)). There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+ if sink, ok := l.sink.(CallDepthLogSink); ok {
+ return sink.WithCallDepth(2)
+ }
+ return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if l.sink == nil || len(attrs) == 0 {
+ return l
+ }
+
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithAttrs(attrs)
+ clone.sink = clone.slogSink
+ } else {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ }
+ clone.sink = l.sink.WithValues(kvList...)
+ }
+ return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+ if l.sink == nil {
+ return l
+ }
+ if name == "" {
+ // slog says to inline empty groups
+ return l
+ }
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithGroup(name)
+ clone.sink = clone.slogSink
+ } else {
+ clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+ }
+ return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ prefix := groupPrefix
+ if attr.Key != "" {
+ prefix = addPrefix(groupPrefix, attr.Key)
+ }
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, prefix, grpKVs)
+ }
+ kvList = append(kvList, grpKVs...)
+ } else if attr.Key != "" {
+ kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+ }
+
+ return kvList
+}
+
+func addPrefix(prefix, name string) string {
+ if prefix == "" {
+ return name
+ }
+ if name == "" {
+ return prefix
+ }
+ return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+ result := -level
+ result += l.levelBias // in case the original Logger had a V level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 00000000000..28a83d02439
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+ if handler, ok := handler.(*slogHandler); ok {
+ if handler.sink == nil {
+ return Discard()
+ }
+ return New(handler.sink).V(int(handler.levelBias))
+ }
+ return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+// logger :=
+// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+ if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+ return sink.handler
+ }
+
+ handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+ if slogSink, ok := handler.sink.(SlogSink); ok {
+ handler.slogSink = slogSink
+ }
+ return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+// as intended by slog
+// - proper grouping of key/value pairs via WithGroup
+// - verbosity levels > slog.LevelInfo can be recorded
+// - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+ LogSink
+
+ Handle(ctx context.Context, record slog.Record) error
+ WithAttrs(attrs []slog.Attr) SlogSink
+ WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 00000000000..4060fcbc2b0
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+ "runtime"
+ "time"
+)
+
+var (
+ _ LogSink = &slogSink{}
+ _ CallDepthLogSink = &slogSink{}
+ _ Underlier = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+ // GetUnderlying returns the Handler used by the LogSink.
+ GetUnderlying() slog.Handler
+}
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+
+ // errKey is used to log the error parameter of Error as an additional attribute.
+ errKey = "err"
+)
+
+type slogSink struct {
+ callDepth int
+ name string
+ handler slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+ return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+ return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+ l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+ l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+ var pcs [1]uintptr
+ // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+ runtime.Callers(3+l.callDepth, pcs[:])
+
+ record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ if l.name != "" {
+ record.AddAttrs(slog.String(nameKey, l.name))
+ }
+ if err != nil {
+ record.AddAttrs(slog.Any(errKey, err))
+ }
+ record.Add(kvList...)
+ _ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+ if l.name != "" {
+ l.name += "/"
+ }
+ l.name += name
+ return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+ l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+ return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+ // We don't need the record itself, only its Add method.
+ record := slog.NewRecord(time.Time{}, 0, "", 0)
+ record.Add(kvList...)
+ attrs := make([]slog.Attr, 0, record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ attrs = append(attrs, attr)
+ return true
+ })
+ return attrs
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
index 6ad1c22bbe3..ff9c57e1d84 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -17,7 +17,7 @@ and corresponding updates for existing programs.
## Parsing and Validation Options
-Under the hood, a new `validator` struct takes care of validating the claims. A
+Under the hood, a new `Validator` struct takes care of validating the claims. A
long awaited feature has been the option to fine-tune the validation of tokens.
This is now possible with several `ParserOption` functions that can be appended
to most `Parse` functions, such as `ParseWithClaims`. The most important options
@@ -68,6 +68,16 @@ type Claims interface {
}
```
+Users that previously directly called the `Valid` function on their claims,
+e.g., to perform validation independently of parsing/verifying a token, can now
+use the `jwt.NewValidator` function to create a `Validator` independently of the
+`Parser`.
+
+```go
+var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second))
+v.Validate(myClaims)
+```
+
### Supported Claim Types and Removal of `StandardClaims`
The two standard claim types supported by this library, `MapClaims` and
@@ -169,7 +179,7 @@ be a drop-in replacement, if you're having troubles migrating, please open an
issue.
You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
-`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v5`, either manually
+`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually
or by using tools such as `sed` or `gofmt`.
And then you'd typically run:
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
index 4ccae2a857d..ca85659ba4b 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -62,7 +62,7 @@ func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interf
case *ecdsa.PublicKey:
ecdsaKey = k
default:
- return ErrInvalidKeyType
+ return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType)
}
if len(sig) != 2*m.KeySize {
@@ -96,7 +96,7 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte
case *ecdsa.PrivateKey:
ecdsaKey = k
default:
- return nil, ErrInvalidKeyType
+ return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType)
}
// Create the hasher
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
index 3db00e4a233..c2138119e51 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -1,11 +1,10 @@
package jwt
import (
- "errors"
-
"crypto"
"crypto/ed25519"
"crypto/rand"
+ "errors"
)
var (
@@ -39,7 +38,7 @@ func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key inte
var ok bool
if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
- return ErrInvalidKeyType
+ return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType)
}
if len(ed25519Key) != ed25519.PublicKeySize {
@@ -61,7 +60,7 @@ func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]by
var ok bool
if ed25519Key, ok = key.(crypto.Signer); !ok {
- return nil, ErrInvalidKeyType
+ return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType)
}
if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
index 3afb04e648f..2ad542f00ca 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
@@ -22,7 +22,7 @@ func (je joinedError) Is(err error) bool {
// wrappedErrors is a workaround for wrapping multiple errors in environments
// where Go 1.20 is not available. It basically uses the already implemented
-// functionatlity of joinedError to handle multiple errors with supplies a
+// functionality of joinedError to handle multiple errors with supplies a
// custom error message that is identical to the one we produce in Go 1.20 using
// multiple %w directives.
type wrappedErrors struct {
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
index 91b688ba9f1..96c62722d18 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -59,7 +59,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa
// Verify the key is the right type
keyBytes, ok := key.([]byte)
if !ok {
- return ErrInvalidKeyType
+ return newError("HMAC verify expects []byte", ErrInvalidKeyType)
}
// Can we use the specified hashing method?
@@ -91,7 +91,7 @@ func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interfa
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
- return nil, ErrHashUnavailable
+ return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
}
hasher := hmac.New(m.Hash.New, keyBytes)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
index c93daa58495..685c2ea3065 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/none.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -32,7 +32,7 @@ func (m *signingMethodNone) Verify(signingString string, sig []byte, key interfa
return NoneSignatureTypeDisallowedError
}
// If signing method is none, signature must be an empty string
- if string(sig) != "" {
+ if len(sig) != 0 {
return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
index f4386fbaace..ecf99af78f9 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/parser.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -18,7 +18,7 @@ type Parser struct {
// Skip claims validation during token parsing.
skipClaimsValidation bool
- validator *validator
+ validator *Validator
decodeStrict bool
@@ -28,7 +28,7 @@ type Parser struct {
// NewParser creates a new Parser with the specified options
func NewParser(options ...ParserOption) *Parser {
p := &Parser{
- validator: &validator{},
+ validator: &Validator{},
}
// Loop through our parsing options and apply them
@@ -74,24 +74,40 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
}
}
- // Lookup key
- var key interface{}
+ // Decode signature
+ token.Signature, err = p.DecodeSegment(parts[2])
+ if err != nil {
+ return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ }
+ text := strings.Join(parts[0:2], ".")
+
+ // Lookup key(s)
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
}
- if key, err = keyFunc(token); err != nil {
- return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
- }
- // Decode signature
- token.Signature, err = p.DecodeSegment(parts[2])
+ got, err := keyFunc(token)
if err != nil {
- return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
}
- // Perform signature validation
- if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ switch have := got.(type) {
+ case VerificationKeySet:
+ if len(have.Keys) == 0 {
+ return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
+ }
+ // Iterate through keys and verify signature, skipping the rest when a match is found.
+ // Return the last error if no match is found.
+ for _, key := range have.Keys {
+ if err = token.Method.Verify(text, token.Signature, key); err == nil {
+ break
+ }
+ }
+ default:
+ err = token.Method.Verify(text, token.Signature, have)
+ }
+ if err != nil {
return token, newError("", ErrTokenSignatureInvalid, err)
}
@@ -99,7 +115,7 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
if !p.skipClaimsValidation {
// Make sure we have at least a default validator
if p.validator == nil {
- p.validator = newValidator()
+ p.validator = NewValidator()
}
if err := p.validator.Validate(claims); err != nil {
@@ -117,8 +133,8 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
//
// WARNING: Don't use this method unless you know what you're doing.
//
-// It's only ever useful in cases where you know the signature is valid (because it has
-// been checked previously in the stack) and you want to extract values from it.
+// It's only ever useful in cases where you know the signature is valid (since it has already
+// been or will be checked elsewhere in the stack) and you want to extract values from it.
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
parts = strings.Split(tokenString, ".")
if len(parts) != 3 {
@@ -130,9 +146,6 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
// parse Header
var headerBytes []byte
if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
- if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
- return token, parts, newError("tokenstring should not contain 'bearer '", ErrTokenMalformed)
- }
return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
}
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
@@ -140,23 +153,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
}
// parse Claims
- var claimBytes []byte
token.Claims = claims
- if claimBytes, err = p.DecodeSegment(parts[1]); err != nil {
+ claimBytes, err := p.DecodeSegment(parts[1])
+ if err != nil {
return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
}
- dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
- if p.useJSONNumber {
- dec.UseNumber()
- }
- // JSON Decode. Special case for map type to avoid weird pointer behavior
- if c, ok := token.Claims.(MapClaims); ok {
- err = dec.Decode(&c)
+
+ // If `useJSONNumber` is enabled then we must use *json.Decoder to decode
+ // the claims. However, this comes with a performance penalty so only use
+ // it if we must and, otherwise, simple use json.Unmarshal.
+ if !p.useJSONNumber {
+ // JSON Unmarshal. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = json.Unmarshal(claimBytes, &c)
+ } else {
+ err = json.Unmarshal(claimBytes, &claims)
+ }
} else {
- err = dec.Decode(&claims)
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ dec.UseNumber()
+ // JSON Decode. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
}
- // Handle decode error
if err != nil {
return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
index 1b5af970f66..88a780fbd4a 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -58,6 +58,14 @@ func WithIssuedAt() ParserOption {
}
}
+// WithExpirationRequired returns the ParserOption to make exp claim required.
+// By default exp claim is optional.
+func WithExpirationRequired() ParserOption {
+ return func(p *Parser) {
+ p.validator.requireExp = true
+ }
+}
+
// WithAudience configures the validator to require the specified audience in
// the `aud` claim. Validation will fail if the audience is not listed in the
// token or the `aud` claim is missing.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
index daff094313d..83cbee6ae2b 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -51,7 +51,7 @@ func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interfac
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
- return ErrInvalidKeyType
+ return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType)
}
// Create hasher
@@ -73,7 +73,7 @@ func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte,
// Validate type of key
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
- return nil, ErrInvalidKey
+ return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType)
}
// Create the hasher
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
index 9599f0a46c0..28c386ec43a 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -88,7 +88,7 @@ func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key inter
case *rsa.PublicKey:
rsaKey = k
default:
- return ErrInvalidKey
+ return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType)
}
// Create hasher
@@ -115,7 +115,7 @@ func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byt
case *rsa.PrivateKey:
rsaKey = k
default:
- return nil, ErrInvalidKeyType
+ return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType)
}
// Create the hasher
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
index c8ad7c7834d..352873a2d9c 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/token.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -1,6 +1,7 @@
package jwt
import (
+ "crypto"
"encoding/base64"
"encoding/json"
)
@@ -9,8 +10,21 @@ import (
// the key for verification. The function receives the parsed, but unverified
// Token. This allows you to use properties in the Header of the token (such as
// `kid`) to identify which key to use.
+//
+// The returned interface{} may be a single key or a VerificationKeySet containing
+// multiple keys.
type Keyfunc func(*Token) (interface{}, error)
+// VerificationKey represents a public or secret key for verifying a token's signature.
+type VerificationKey interface {
+ crypto.PublicKey | []uint8
+}
+
+// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token.
+type VerificationKeySet struct {
+ Keys []VerificationKey
+}
+
// Token represents a JWT Token. Different fields will be used depending on
// whether you're creating or parsing/verifying a token.
type Token struct {
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
index b82b38867d0..b2655a9e6d2 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/types.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"math"
- "reflect"
"strconv"
"time"
)
@@ -121,14 +120,14 @@ func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
for _, vv := range v {
vs, ok := vv.(string)
if !ok {
- return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+ return ErrInvalidType
}
aud = append(aud, vs)
}
case nil:
return nil
default:
- return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+ return ErrInvalidType
}
*s = aud
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
index 3850438939d..008ecd8712e 100644
--- a/vendor/github.com/golang-jwt/jwt/v5/validator.go
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -28,13 +28,12 @@ type ClaimsValidator interface {
Validate() error
}
-// validator is the core of the new Validation API. It is automatically used by
+// Validator is the core of the new Validation API. It is automatically used by
// a [Parser] during parsing and can be modified with various parser options.
//
-// Note: This struct is intentionally not exported (yet) as we want to
-// internally finalize its API. In the future, we might make it publicly
-// available.
-type validator struct {
+// The [NewValidator] function should be used to create an instance of this
+// struct.
+type Validator struct {
// leeway is an optional leeway that can be provided to account for clock skew.
leeway time.Duration
@@ -42,6 +41,9 @@ type validator struct {
// validation. If unspecified, this defaults to time.Now.
timeFunc func() time.Time
+ // requireExp specifies whether the exp claim is required
+ requireExp bool
+
// verifyIat specifies whether the iat (Issued At) claim will be verified.
// According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
// only specifies the age of the token, but no validation check is
@@ -62,16 +64,28 @@ type validator struct {
expectedSub string
}
-// newValidator can be used to create a stand-alone validator with the supplied
+// NewValidator can be used to create a stand-alone validator with the supplied
// options. This validator can then be used to validate already parsed claims.
-func newValidator(opts ...ParserOption) *validator {
+//
+// Note: Under normal circumstances, explicitly creating a validator is not
+// needed and can potentially be dangerous; instead functions of the [Parser]
+// class should be used.
+//
+// The [Validator] is only checking the *validity* of the claims, such as its
+// expiration time, but it does NOT perform *signature verification* of the
+// token.
+func NewValidator(opts ...ParserOption) *Validator {
p := NewParser(opts...)
return p.validator
}
// Validate validates the given claims. It will also perform any custom
// validation if claims implements the [ClaimsValidator] interface.
-func (v *validator) Validate(claims Claims) error {
+//
+// Note: It will NOT perform any *signature verification* on the token that
+// contains the claims and expects that the [Claim] was already successfully
+// verified.
+func (v *Validator) Validate(claims Claims) error {
var (
now time.Time
errs []error = make([]error, 0, 6)
@@ -86,8 +100,9 @@ func (v *validator) Validate(claims Claims) error {
}
// We always need to check the expiration time, but usage of the claim
- // itself is OPTIONAL.
- if err = v.verifyExpiresAt(claims, now, false); err != nil {
+ // itself is OPTIONAL by default. requireExp overrides this behavior
+ // and makes the exp claim mandatory.
+ if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil {
errs = append(errs, err)
}
@@ -149,7 +164,7 @@ func (v *validator) Validate(claims Claims) error {
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
exp, err := claims.GetExpirationTime()
if err != nil {
return err
@@ -170,7 +185,7 @@ func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool)
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
iat, err := claims.GetIssuedAt()
if err != nil {
return err
@@ -191,7 +206,7 @@ func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool)
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
nbf, err := claims.GetNotBefore()
if err != nil {
return err
@@ -211,7 +226,7 @@ func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool)
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *validator) verifyAudience(claims Claims, cmp string, required bool) error {
+func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error {
aud, err := claims.GetAudience()
if err != nil {
return err
@@ -247,7 +262,7 @@ func (v *validator) verifyAudience(claims Claims, cmp string, required bool) err
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error {
iss, err := claims.GetIssuer()
if err != nil {
return err
@@ -267,7 +282,7 @@ func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error
//
// Additionally, if any error occurs while retrieving the claim, e.g., when its
// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *validator) verifySubject(claims Claims, cmp string, required bool) error {
+func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error {
sub, err := claims.GetSubject()
if err != nil {
return err
diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go
index c984f3c8f22..aba609deac5 100644
--- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go
+++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go
@@ -299,18 +299,29 @@ func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer,
// v1.Layer doesn't force consumers to care about whether the layer is compressed
// we should be fine returning the DockerLayer media type
mt := types.DockerLayer
- if bd, ok := i.imgDescriptor.LayerSources[h]; ok {
- // Overwrite the mediaType for foreign layers.
- return &foreignUncompressedLayer{
- uncompressedLayerFromTarball: uncompressedLayerFromTarball{
- diffID: diffID,
- mediaType: bd.MediaType,
- opener: i.opener,
- filePath: i.imgDescriptor.Layers[idx],
- },
- desc: bd,
- }, nil
+ bd, ok := i.imgDescriptor.LayerSources[h]
+ if ok {
+ // This is janky, but we don't want to implement Descriptor for
+ // uncompressed layers because it breaks a bunch of assumptions in partial.
+ // See https://github.com/google/go-containerregistry/issues/1870
+ docker25workaround := bd.MediaType == types.DockerUncompressedLayer || bd.MediaType == types.OCIUncompressedLayer
+
+ if !docker25workaround {
+ // Overwrite the mediaType for foreign layers.
+ return &foreignUncompressedLayer{
+ uncompressedLayerFromTarball: uncompressedLayerFromTarball{
+ diffID: diffID,
+ mediaType: bd.MediaType,
+ opener: i.opener,
+ filePath: i.imgDescriptor.Layers[idx],
+ },
+ desc: bd,
+ }, nil
+ }
+
+ // Intentional fall through.
}
+
return &uncompressedLayerFromTarball{
diffID: diffID,
mediaType: mt,
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
index 7ed347d3ad7..7ec5ac7ea90 100644
--- a/vendor/github.com/google/uuid/CHANGELOG.md
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -1,5 +1,25 @@
# Changelog
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b404f4bec27..dc60082d3b3 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
)
// NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06cdc87..c351129279f 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1 and 2 UUIDs.
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time)
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index dc75f7d9909..5232b486780 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 00000000000..339a959a7a2
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 00000000000..3167b643d45
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
index ef508417b39..34107104e97 100644
--- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
+++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- "v2": "2.12.0"
+ "v2": "2.12.1"
}
diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
index ae711494702..e16ab6033e2 100644
--- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
+++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
@@ -1,5 +1,12 @@
# Changelog
+## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13)
+
+
+### Bug Fixes
+
+* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe))
+
## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26)
diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go
index af15fb5827d..9aab3d91f2d 100644
--- a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go
+++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go
@@ -38,6 +38,14 @@ import (
)
const (
+ // XGoogFieldMaskHeader is the canonical header key for the [System Parameter]
+ // that specifies the response read mask. The value(s) for this header
+ // must adhere to format described in [fieldmaskpb].
+ //
+ // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters
+ // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb
+ XGoogFieldMaskHeader = "x-goog-fieldmask"
+
headerKey = contextKey("header")
)
diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go
index 453fab7ecca..3e53729e5fc 100644
--- a/vendor/github.com/googleapis/gax-go/v2/header.go
+++ b/vendor/github.com/googleapis/gax-go/v2/header.go
@@ -103,7 +103,9 @@ func goVersion() string {
return "UNKNOWN"
}
-// XGoogHeader is for use by the Google Cloud Libraries only.
+// XGoogHeader is for use by the Google Cloud Libraries only. See package
+// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving
+// request/response headers.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
@@ -125,7 +127,8 @@ func XGoogHeader(keyval ...string) string {
}
// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries
-// only.
+// only. See package [github.com/googleapis/gax-go/v2/callctx] for help
+// setting/retrieving request/response headers.
//
// InsertMetadataIntoOutgoingContext returns a new context that merges the
// provided keyvals metadata pairs with any existing metadata/headers in the
@@ -137,7 +140,9 @@ func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) c
return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...))
}
-// BuildHeaders is for use by the Google Cloud Libraries only.
+// BuildHeaders is for use by the Google Cloud Libraries only. See package
+// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving
+// request/response headers.
//
// BuildHeaders returns a new http.Header that merges the provided
// keyvals header pairs with any existing metadata/headers in the provided
diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
index 7425b5ffbb0..890d4819e98 100644
--- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go
+++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
@@ -30,4 +30,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "2.12.0"
+const Version = "2.12.1"
diff --git a/vendor/github.com/hashicorp/vault/api/.copywrite.hcl b/vendor/github.com/hashicorp/vault/api/.copywrite.hcl
new file mode 100644
index 00000000000..c4b09f33640
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/.copywrite.hcl
@@ -0,0 +1,8 @@
+schema_version = 1
+
+project {
+ license = "MPL-2.0"
+ copyright_year = 2024
+
+ header_ignore = []
+}
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index 1ba9da48eae..52c991b1e2f 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -82,6 +82,8 @@ const (
const (
EnvVaultAgentAddress = "VAULT_AGENT_ADDR"
EnvVaultInsecure = "VAULT_SKIP_VERIFY"
+
+ DefaultAddress = "https://127.0.0.1:8200"
)
// WrappingLookupFunc is a function that, given an HTTP verb and a path,
@@ -248,7 +250,7 @@ type TLSConfig struct {
// If an error is encountered, the Error field on the returned *Config will be populated with the specific error.
func DefaultConfig() *Config {
config := &Config{
- Address: "https://127.0.0.1:8200",
+ Address: DefaultAddress,
HttpClient: cleanhttp.DefaultPooledClient(),
Timeout: time.Second * 60,
MinRetryWait: time.Millisecond * 1000,
@@ -528,6 +530,7 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) {
return nil, err
}
+ previousAddress := c.Address
c.Address = address
if strings.HasPrefix(address, "unix://") {
@@ -550,7 +553,7 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) {
} else {
return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport")
}
- } else if strings.HasPrefix(c.Address, "unix://") {
+ } else if strings.HasPrefix(previousAddress, "unix://") {
// When the address being set does not begin with unix:// but the previous
// address in the Config did, change the transport's DialContext back to
// use the default configuration that cleanhttp uses.
@@ -589,6 +592,7 @@ type Client struct {
requestCallbacks []RequestCallback
responseCallbacks []ResponseCallback
replicationStateStore *replicationStateStore
+ hcpCookie *http.Cookie
}
// NewClient returns a new client for the given configuration.
@@ -1025,6 +1029,33 @@ func (c *Client) SetToken(v string) {
c.token = v
}
+// HCPCookie returns the HCP cookie being used by this client. It will
+// return an empty cookie when no cookie is set.
+func (c *Client) HCPCookie() string {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
+ if c.hcpCookie == nil {
+ return ""
+ }
+ return c.hcpCookie.String()
+}
+
+// SetHCPCookie sets the hcp cookie directly. This won't perform any auth
+// verification, it simply sets the token properly for future requests.
+func (c *Client) SetHCPCookie(v *http.Cookie) error {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ if err := v.Valid(); err != nil {
+ return err
+ }
+
+ c.hcpCookie = v
+
+ return nil
+}
+
// ClearToken deletes the token if it is set or does nothing otherwise.
func (c *Client) ClearToken() {
c.modifyLock.Lock()
@@ -1299,6 +1330,8 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
Params: make(map[string][]string),
}
+ req.HCPCookie = c.hcpCookie
+
var lookupPath string
switch {
case strings.HasPrefix(requestPath, "/v1/"):
diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go
index 927dd168e44..068e9068f38 100644
--- a/vendor/github.com/hashicorp/vault/api/logical.go
+++ b/vendor/github.com/hashicorp/vault/api/logical.go
@@ -212,6 +212,17 @@ func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[st
return c.write(ctx, path, r)
}
+func (c *Logical) WriteRaw(path string, data []byte) (*Response, error) {
+ return c.WriteRawWithContext(context.Background(), path, data)
+}
+
+func (c *Logical) WriteRawWithContext(ctx context.Context, path string, data []byte) (*Response, error) {
+ r := c.c.NewRequest(http.MethodPut, "/v1/"+path)
+ r.BodyBytes = data
+
+ return c.writeRaw(ctx, r)
+}
+
func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) {
r := c.c.NewRequest(http.MethodPatch, "/v1/"+path)
r.Headers.Set("Content-Type", "application/merge-patch+json")
@@ -261,6 +272,14 @@ func (c *Logical) write(ctx context.Context, path string, request *Request) (*Se
return ParseSecret(resp.Body)
}
+func (c *Logical) writeRaw(ctx context.Context, request *Request) (*Response, error) {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ resp, err := c.c.rawRequestWithContext(ctx, request)
+ return resp, err
+}
+
func (c *Logical) Delete(path string) (*Secret, error) {
return c.DeleteWithContext(context.Background(), path)
}
diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go
index a8d23252977..3705c7310a8 100644
--- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go
+++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go
@@ -51,6 +51,7 @@ type PluginAPIClientMeta struct {
flagCAPath string
flagClientCert string
flagClientKey string
+ flagServerName string
flagInsecure bool
}
@@ -62,6 +63,7 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet {
fs.StringVar(&f.flagCAPath, "ca-path", "", "")
fs.StringVar(&f.flagClientCert, "client-cert", "", "")
fs.StringVar(&f.flagClientKey, "client-key", "", "")
+ fs.StringVar(&f.flagServerName, "tls-server-name", "", "")
fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "")
return fs
@@ -70,13 +72,13 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet {
// GetTLSConfig will return a TLSConfig based off the values from the flags
func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig {
// If we need custom TLS configuration, then set it
- if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure {
+ if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure || f.flagServerName != "" {
t := &TLSConfig{
CACert: f.flagCACert,
CAPath: f.flagCAPath,
ClientCert: f.flagClientCert,
ClientKey: f.flagClientKey,
- TLSServerName: "",
+ TLSServerName: f.flagServerName,
Insecure: f.flagInsecure,
}
diff --git a/vendor/github.com/hashicorp/vault/api/plugin_types.go b/vendor/github.com/hashicorp/vault/api/plugin_types.go
index 4c759a2decc..c8f69ae404f 100644
--- a/vendor/github.com/hashicorp/vault/api/plugin_types.go
+++ b/vendor/github.com/hashicorp/vault/api/plugin_types.go
@@ -7,7 +7,10 @@ package api
// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go
// Any changes made should be made to both files at the same time.
-import "fmt"
+import (
+ "encoding/json"
+ "fmt"
+)
var PluginTypes = []PluginType{
PluginTypeUnknown,
@@ -64,3 +67,34 @@ func ParsePluginType(pluginType string) (PluginType, error) {
return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType)
}
}
+
+// UnmarshalJSON implements json.Unmarshaler. It supports unmarshaling either a
+// string or a uint32. All new serialization will be as a string, but we
+// previously serialized as a uint32 so we need to support that for backwards
+// compatibility.
+func (p *PluginType) UnmarshalJSON(data []byte) error {
+ var asString string
+ err := json.Unmarshal(data, &asString)
+ if err == nil {
+ *p, err = ParsePluginType(asString)
+ return err
+ }
+
+ var asUint32 uint32
+ err = json.Unmarshal(data, &asUint32)
+ if err != nil {
+ return err
+ }
+ *p = PluginType(asUint32)
+ switch *p {
+ case PluginTypeUnknown, PluginTypeCredential, PluginTypeDatabase, PluginTypeSecrets:
+ return nil
+ default:
+ return fmt.Errorf("%d is not a supported plugin type", asUint32)
+ }
+}
+
+// MarshalJSON implements json.Marshaler.
+func (p PluginType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.String())
+}
diff --git a/vendor/github.com/hashicorp/vault/api/replication_status.go b/vendor/github.com/hashicorp/vault/api/replication_status.go
index 1668daf19c1..9bc02d53935 100644
--- a/vendor/github.com/hashicorp/vault/api/replication_status.go
+++ b/vendor/github.com/hashicorp/vault/api/replication_status.go
@@ -19,11 +19,13 @@ const (
)
type ClusterInfo struct {
- APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"`
- ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"`
- ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"`
- LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"`
- NodeID string `json:"node_id,omitempty" mapstructure:"node_id"`
+ APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"`
+ ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"`
+ ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"`
+ LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"`
+ LastHeartBeatDurationMillis string `json:"last_heartbeat_duration_ms,omitempty" mapstructure:"last_heartbeat_duration_ms"`
+ ClockSkewMillis string `json:"clock_skew_ms,omitempty" mapstructure:"clock_skew_ms"`
+ NodeID string `json:"node_id,omitempty" mapstructure:"node_id"`
}
type ReplicationStatusGenericResponse struct {
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
index ecf783701ad..a2d912c64dc 100644
--- a/vendor/github.com/hashicorp/vault/api/request.go
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -39,6 +39,9 @@ type Request struct {
// EGPs). If set, the override flag will take effect for all policies
// evaluated during the request.
PolicyOverride bool
+
+ // HCPCookie is used to set a http cookie when client is connected to HCP
+ HCPCookie *http.Cookie
}
// SetJSONBody is used to set a request body that is a JSON-encoded value.
@@ -145,5 +148,9 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
req.Header.Set("X-Vault-Policy-Override", "true")
}
+ if r.HCPCookie != nil {
+ req.AddCookie(r.HCPCookie)
+ }
+
return req, nil
}
diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go
index 3d15f7a806a..d37bf3cf06b 100644
--- a/vendor/github.com/hashicorp/vault/api/secret.go
+++ b/vendor/github.com/hashicorp/vault/api/secret.go
@@ -42,6 +42,10 @@ type Secret struct {
// cubbyhole of the given token (which has a TTL of the given number of
// seconds)
WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"`
+
+ // MountType, if non-empty, provides some information about what kind
+ // of mount this secret came from.
+ MountType string `json:"mount_type,omitempty"`
}
// TokenID returns the standardized token ID (token) for the given secret.
diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
index 6310d42fcf4..d57b7571175 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
@@ -78,3 +78,56 @@ func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) (
return res, nil
}
+
+func (c *Sys) CapabilitiesAccessor(accessor, path string) ([]string, error) {
+ return c.CapabilitiesAccessorWithContext(context.Background(), accessor, path)
+}
+
+func (c *Sys) CapabilitiesAccessorWithContext(ctx context.Context, accessor, path string) ([]string, error) {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ body := map[string]string{
+ "accessor": accessor,
+ "path": path,
+ }
+
+ reqPath := "/v1/sys/capabilities-accessor"
+
+ r := c.c.NewRequest(http.MethodPost, reqPath)
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.rawRequestWithContext(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ secret, err := ParseSecret(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ if secret == nil || secret.Data == nil {
+ return nil, errors.New("data from server response is empty")
+ }
+
+ var res []string
+ err = mapstructure.Decode(secret.Data[path], &res)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(res) == 0 {
+ _, ok := secret.Data["capabilities"]
+ if ok {
+ err = mapstructure.Decode(secret.Data["capabilities"], &res)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return res, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go
index 2b2aa7c3e98..58a73b89cbb 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go
@@ -35,12 +35,14 @@ type HAStatusResponse struct {
}
type HANode struct {
- Hostname string `json:"hostname"`
- APIAddress string `json:"api_address"`
- ClusterAddress string `json:"cluster_address"`
- ActiveNode bool `json:"active_node"`
- LastEcho *time.Time `json:"last_echo"`
- Version string `json:"version"`
- UpgradeVersion string `json:"upgrade_version,omitempty"`
- RedundancyZone string `json:"redundancy_zone,omitempty"`
+ Hostname string `json:"hostname"`
+ APIAddress string `json:"api_address"`
+ ClusterAddress string `json:"cluster_address"`
+ ActiveNode bool `json:"active_node"`
+ LastEcho *time.Time `json:"last_echo"`
+ EchoDurationMillis int64 `json:"echo_duration_ms"`
+ ClockSkewMillis int64 `json:"clock_skew_ms"`
+ Version string `json:"version"`
+ UpgradeVersion string `json:"upgrade_version,omitempty"`
+ RedundancyZone string `json:"redundancy_zone,omitempty"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go
index 13fd8d4d374..0dc849885ff 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_health.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_health.go
@@ -49,4 +49,7 @@ type HealthResponse struct {
ClusterName string `json:"cluster_name,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
LastWAL uint64 `json:"last_wal,omitempty"`
+ Enterprise bool `json:"enterprise"`
+ EchoDurationMillis int64 `json:"echo_duration_ms"`
+ ClockSkewMillis int64 `json:"clock_skew_ms"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
index a6c2a0f5412..b9f4f8f6f83 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -271,6 +271,9 @@ type MountConfigInput struct {
AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"`
PluginVersion string `json:"plugin_version,omitempty"`
UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"`
+ DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"`
+ IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"`
+
// Deprecated: This field will always be blank for newer server responses.
PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
@@ -303,6 +306,9 @@ type MountConfigOutput struct {
TokenType string `json:"token_type,omitempty" mapstructure:"token_type"`
AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"`
UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"`
+ DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"`
+ IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"`
+
// Deprecated: This field will always be blank for newer server responses.
PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go
index 68320d2d8a2..9d424d009ec 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go
@@ -36,6 +36,8 @@ type ListPluginsResponse struct {
type PluginDetails struct {
Type string `json:"type"`
Name string `json:"name"`
+ OCIImage string `json:"oci_image,omitempty" mapstructure:"oci_image"`
+ Runtime string `json:"runtime,omitempty"`
Version string `json:"version,omitempty"`
Builtin bool `json:"builtin"`
DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"`
@@ -144,9 +146,10 @@ type GetPluginResponse struct {
Args []string `json:"args"`
Builtin bool `json:"builtin"`
Command string `json:"command"`
- OCIImage string `json:"oci_image"`
Name string `json:"name"`
SHA256 string `json:"sha256"`
+ OCIImage string `json:"oci_image,omitempty"`
+ Runtime string `json:"runtime,omitempty"`
DeprecationStatus string `json:"deprecation_status,omitempty"`
Version string `json:"version,omitempty"`
}
@@ -206,6 +209,9 @@ type RegisterPluginInput struct {
// OCIImage specifies the container image to run as a plugin.
OCIImage string `json:"oci_image,omitempty"`
+ // Runtime is the Vault plugin runtime to use when running the plugin.
+ Runtime string `json:"runtime,omitempty"`
+
// Env specifies a list of key=value pairs to add to the plugin's environment
// variables.
Env []string `json:"env,omitempty"`
@@ -268,6 +274,22 @@ func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPlug
return err
}
+// RootReloadPluginInput is used as input to the RootReloadPlugin function.
+type RootReloadPluginInput struct {
+ Plugin string `json:"-"` // Plugin name, as registered in the plugin catalog.
+ Type PluginType `json:"-"` // Plugin type: auth, secret, or database.
+ Scope string `json:"scope,omitempty"` // Empty to reload on current node, "global" for all nodes.
+}
+
+// RootReloadPlugin reloads plugins, possibly returning reloadID for a global
+// scoped reload. This is only available in the root namespace, and reloads
+// plugins across all namespaces, whereas ReloadPlugin is available in all
+// namespaces but only reloads plugins in use in the request's namespace.
+func (c *Sys) RootReloadPlugin(ctx context.Context, i *RootReloadPluginInput) (string, error) {
+ path := fmt.Sprintf("/v1/sys/plugins/reload/%s/%s", i.Type.String(), i.Plugin)
+ return c.reloadPluginInternal(ctx, path, i, i.Scope == "global")
+}
+
// ReloadPluginInput is used as input to the ReloadPlugin function.
type ReloadPluginInput struct {
// Plugin is the name of the plugin to reload, as registered in the plugin catalog
@@ -286,15 +308,20 @@ func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) {
}
// ReloadPluginWithContext reloads mounted plugin backends, possibly returning
-// reloadId for a cluster scoped reload
+// reloadID for a cluster scoped reload. It is limited to reloading plugins that
+// are in use in the request's namespace. See RootReloadPlugin for an API that
+// can reload plugins across all namespaces.
func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) {
+ return c.reloadPluginInternal(ctx, "/v1/sys/plugins/reload/backend", i, i.Scope == "global")
+}
+
+func (c *Sys) reloadPluginInternal(ctx context.Context, path string, body any, global bool) (string, error) {
ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
defer cancelFunc()
- path := "/v1/sys/plugins/reload/backend"
req := c.c.NewRequest(http.MethodPut, path)
- if err := req.SetJSONBody(i); err != nil {
+ if err := req.SetJSONBody(body); err != nil {
return "", err
}
@@ -304,7 +331,7 @@ func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput)
}
defer resp.Body.Close()
- if i.Scope == "global" {
+ if global {
// Get the reload id
secret, parseErr := ParseSecret(resp.Body)
if parseErr != nil {
diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go b/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go
index c3380a85d1b..b56a899f650 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go
@@ -64,8 +64,9 @@ type RegisterPluginRuntimeInput struct {
OCIRuntime string `json:"oci_runtime,omitempty"`
CgroupParent string `json:"cgroup_parent,omitempty"`
- CPU int64 `json:"cpu,omitempty"`
- Memory int64 `json:"memory,omitempty"`
+ CPU int64 `json:"cpu_nanos,omitempty"`
+ Memory int64 `json:"memory_bytes,omitempty"`
+ Rootless bool `json:"rootless,omitempty"`
}
// RegisterPluginRuntime registers the plugin with the given information.
diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go
index 7a9c5621ed1..62002496c36 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_seal.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go
@@ -109,6 +109,7 @@ type SealStatusResponse struct {
ClusterName string `json:"cluster_name,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
RecoverySeal bool `json:"recovery_seal"`
+ RecoverySealType string `json:"recovery_seal_type,omitempty"`
StorageType string `json:"storage_type,omitempty"`
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`
diff --git a/vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go b/vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go
new file mode 100644
index 00000000000..a129efea763
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go
@@ -0,0 +1,281 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package api
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+)
+
+const (
+ // baseEndpoint is the common base URL path for all endpoints used in this
+ // module.
+ baseEndpoint string = "/v1/sys/config/ui/custom-messages"
+)
+
+// ListUICustomMessages calls ListUICustomMessagesWithContext using a background
+// Context.
+func (c *Sys) ListUICustomMessages(req UICustomMessageListRequest) (*Secret, error) {
+ return c.ListUICustomMessagesWithContext(context.Background(), req)
+}
+
+// ListUICustomMessagesWithContext sends a request to the List custom messages
+// endpoint using the provided Context and UICustomMessageListRequest value as
+// the inputs. It returns a pointer to a Secret if a response was obtained from
+// the server, including error responses; or an error if a response could not be
+// obtained due to an error.
+func (c *Sys) ListUICustomMessagesWithContext(ctx context.Context, req UICustomMessageListRequest) (*Secret, error) {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ r := c.c.NewRequest("LIST", fmt.Sprintf("%s/", baseEndpoint))
+ if req.Active != nil {
+ r.Params.Add("active", strconv.FormatBool(*req.Active))
+ }
+ if req.Authenticated != nil {
+ r.Params.Add("authenticated", strconv.FormatBool(*req.Authenticated))
+ }
+ if req.Type != nil {
+ r.Params.Add("type", *req.Type)
+ }
+
+ resp, err := c.c.rawRequestWithContext(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ secret, err := ParseSecret(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ if secret == nil || secret.Data == nil {
+ return nil, errors.New("data from server response is empty")
+ }
+
+ return secret, nil
+}
+
+// CreateUICustomMessage calls CreateUICustomMessageWithContext using a
+// background Context.
+func (c *Sys) CreateUICustomMessage(req UICustomMessageRequest) (*Secret, error) {
+ return c.CreateUICustomMessageWithContext(context.Background(), req)
+}
+
+// CreateUICustomMessageWithContext sends a request to the Create custom
+// messages endpoint using the provided Context and UICustomMessageRequest
+// values as the inputs. It returns a pointer to a Secret if a response was
+// obtained from the server, including error responses; or an error if a
+// response could not be obtained due to an error.
+func (c *Sys) CreateUICustomMessageWithContext(ctx context.Context, req UICustomMessageRequest) (*Secret, error) {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ r := c.c.NewRequest(http.MethodPost, baseEndpoint)
+ if err := r.SetJSONBody(&req); err != nil {
+ return nil, fmt.Errorf("error encoding request body to json: %w", err)
+ }
+
+ resp, err := c.c.rawRequestWithContext(ctx, r)
+ if err != nil {
+ return nil, fmt.Errorf("error sending request to server: %w", err)
+ }
+ defer resp.Body.Close()
+
+ secret, err := ParseSecret(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse secret from server response: %w", err)
+ }
+
+ if secret == nil || secret.Data == nil {
+ return nil, errors.New("data from server response is empty")
+ }
+
+ return secret, nil
+}
+
+// ReadUICustomMessage calls ReadUICustomMessageWithContext using a background
+// Context.
+func (c *Sys) ReadUICustomMessage(id string) (*Secret, error) {
+ return c.ReadUICustomMessageWithContext(context.Background(), id)
+}
+
+// ReadUICustomMessageWithContext sends a request to the Read custom message
+// endpoint using the provided Context and id values. It returns a pointer to a
+// Secret if a response was obtained from the server, including error responses;
+// or an error if a response could not be obtained due to an error.
+func (c *Sys) ReadUICustomMessageWithContext(ctx context.Context, id string) (*Secret, error) {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", baseEndpoint, id))
+
+ resp, err := c.c.rawRequestWithContext(ctx, r)
+ if err != nil {
+ return nil, fmt.Errorf("error sending request to server: %w", err)
+ }
+ defer resp.Body.Close()
+
+ secret, err := ParseSecret(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse secret from server response: %w", err)
+ }
+
+ if secret == nil || secret.Data == nil {
+ return nil, errors.New("data from server response is empty")
+ }
+
+ return secret, nil
+}
+
+// UpdateUICustomMessage calls UpdateUICustomMessageWithContext using a
+// background Context.
+func (c *Sys) UpdateUICustomMessage(id string, req UICustomMessageRequest) error {
+ return c.UpdateUICustomMessageWithContext(context.Background(), id, req)
+}
+
+// UpdateUICustomMessageWithContext sends a request to the Update custom message
+// endpoint using the provided Context, id, and UICustomMessageRequest values.
+// It returns a pointer to a Secret if a response was obtained from the server,
+// including error responses; or an error if a response could not be obtained
+// due to an error.
+func (c *Sys) UpdateUICustomMessageWithContext(ctx context.Context, id string, req UICustomMessageRequest) error {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("%s/%s", baseEndpoint, id))
+ if err := r.SetJSONBody(&req); err != nil {
+ return fmt.Errorf("error encoding request body to json: %w", err)
+ }
+
+ resp, err := c.c.rawRequestWithContext(ctx, r)
+ if err != nil {
+ return fmt.Errorf("error sending request to server: %w", err)
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// DeleteUICustomMessage calls DeleteUICustomMessageWithContext using a
+// background Context.
+func (c *Sys) DeleteUICustomMessage(id string) error {
+ return c.DeletePolicyWithContext(context.Background(), id)
+}
+
+// DeleteUICustomMessageWithContext sends a request to the Delete custom message
+// endpoint using the provided Context and id values. It returns a pointer to a
+// Secret if a response was obtained from the server, including error responses;
+// or an error if a response could not be obtained due to an error.
+func (c *Sys) DeleteUICustomMessageWithContext(ctx context.Context, id string) error {
+ ctx, cancelFunc := c.c.withConfiguredTimeout(ctx)
+ defer cancelFunc()
+
+ r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("%s/%s", baseEndpoint, id))
+
+ resp, err := c.c.rawRequestWithContext(ctx, r)
+ if err != nil {
+ return fmt.Errorf("error sending request to server: %w", err)
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// UICustomMessageListRequest is a struct used to contain inputs for the List
+// custom messages request. Each field is optional, so their types are pointers.
+// The With... methods can be used to easily set the fields with pointers to
+// values.
+type UICustomMessageListRequest struct {
+ Authenticated *bool
+ Type *string
+ Active *bool
+}
+
+// WithAuthenticated sets the Authenticated field to a pointer referencing the
+// provided bool value.
+func (r *UICustomMessageListRequest) WithAuthenticated(value bool) *UICustomMessageListRequest {
+ r.Authenticated = &value
+
+ return r
+}
+
+// WithType sets the Type field to a pointer referencing the provided string
+// value.
+func (r *UICustomMessageListRequest) WithType(value string) *UICustomMessageListRequest {
+ r.Type = &value
+
+ return r
+}
+
+// WithActive sets the Active field to a pointer referencing the provided bool
+// value.
+func (r *UICustomMessageListRequest) WithActive(value bool) *UICustomMessageListRequest {
+ r.Active = &value
+
+ return r
+}
+
+// UICustomMessageRequest is a struct containing the properties of a custom
+// message. The Link field can be set using the WithLink method.
+type UICustomMessageRequest struct {
+ Title string `json:"title"`
+ Message string `json:"message"`
+ Authenticated bool `json:"authenticated"`
+ Type string `json:"type"`
+ StartTime string `json:"start_time"`
+ EndTime string `json:"end_time,omitempty"`
+ Link *uiCustomMessageLink `json:"link,omitempty"`
+ Options map[string]any `json:"options,omitempty"`
+}
+
+// WithLink sets the Link field to the address of a new uiCustomMessageLink
+// struct constructed from the provided title and href values.
+func (r *UICustomMessageRequest) WithLink(title, href string) *UICustomMessageRequest {
+ r.Link = &uiCustomMessageLink{
+ Title: title,
+ Href: href,
+ }
+
+ return r
+}
+
+// uiCustomMessageLink is a utility struct used to represent a link associated
+// with a custom message.
+type uiCustomMessageLink struct {
+ Title string
+ Href string
+}
+
+// MarshalJSON encodes the state of the receiver uiCustomMessageLink as JSON and
+// returns those encoded bytes or an error.
+func (l uiCustomMessageLink) MarshalJSON() ([]byte, error) {
+ m := make(map[string]string)
+
+ m[l.Title] = l.Href
+
+ return json.Marshal(m)
+}
+
+// UnmarshalJSON updates the state of the receiver uiCustomMessageLink from the
+// provided JSON encoded bytes. It returns an error if there was a failure.
+func (l *uiCustomMessageLink) UnmarshalJSON(b []byte) error {
+ m := make(map[string]string)
+
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+
+ for k, v := range m {
+ l.Title = k
+ l.Href = v
+ break
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jellydator/ttlcache/v3/cache.go b/vendor/github.com/jellydator/ttlcache/v3/cache.go
index 93943da226b..1ad3afbece4 100644
--- a/vendor/github.com/jellydator/ttlcache/v3/cache.go
+++ b/vendor/github.com/jellydator/ttlcache/v3/cache.go
@@ -148,6 +148,10 @@ func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] {
c.evict(EvictionReasonCapacityReached, c.items.lru.Back())
}
+ if ttl == PreviousOrDefaultTTL {
+ ttl = c.options.ttl
+ }
+
// create a new item
item := newItem(key, value, ttl, c.options.enableVersionTracking)
elem = c.items.lru.PushFront(item)
@@ -478,6 +482,13 @@ func (c *Cache[K, V]) Items() map[K]*Item[K, V] {
// Range stops the iteration.
func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) {
c.items.mu.RLock()
+
+ // Check if cache is empty
+ if c.items.lru.Len() == 0 {
+ c.items.mu.RUnlock()
+ return
+ }
+
for item := c.items.lru.Front(); item != c.items.lru.Back().Next(); item = item.Next() {
i := item.Value.(*Item[K, V])
c.items.mu.RUnlock()
diff --git a/vendor/github.com/jellydator/ttlcache/v3/item.go b/vendor/github.com/jellydator/ttlcache/v3/item.go
index 72568e07e58..c3c26cf6ba0 100644
--- a/vendor/github.com/jellydator/ttlcache/v3/item.go
+++ b/vendor/github.com/jellydator/ttlcache/v3/item.go
@@ -9,6 +9,10 @@ const (
// NoTTL indicates that an item should never expire.
NoTTL time.Duration = -1
+ // PreviousOrDefaultTTL indicates that existing TTL of item should be used
+ // default TTL will be used as fallback if item doesn't exist
+ PreviousOrDefaultTTL time.Duration = -2
+
// DefaultTTL indicates that the default TTL value of the cache
// instance should be used.
DefaultTTL time.Duration = 0
@@ -58,17 +62,23 @@ func (item *Item[K, V]) update(value V, ttl time.Duration) {
defer item.mu.Unlock()
item.value = value
+
+ // update version if enabled
+ if item.version > -1 {
+ item.version++
+ }
+
+ // no need to update ttl or expiry in this case
+ if ttl == PreviousOrDefaultTTL {
+ return
+ }
+
item.ttl = ttl
// reset expiration timestamp because the new TTL may be
// 0 or below
item.expiresAt = time.Time{}
item.touchUnsafe()
-
- // update version if enabled
- if item.version > -1 {
- item.version++
- }
}
// touch updates the item's expiration timestamp.
diff --git a/vendor/github.com/letsencrypt/boulder/core/challenges.go b/vendor/github.com/letsencrypt/boulder/core/challenges.go
index 4b4a67c4868..1d7e2408d51 100644
--- a/vendor/github.com/letsencrypt/boulder/core/challenges.go
+++ b/vendor/github.com/letsencrypt/boulder/core/challenges.go
@@ -1,5 +1,7 @@
package core
+import "fmt"
+
func newChallenge(challengeType AcmeChallenge, token string) Challenge {
return Challenge{
Type: challengeType,
@@ -25,3 +27,19 @@ func DNSChallenge01(token string) Challenge {
func TLSALPNChallenge01(token string) Challenge {
return newChallenge(ChallengeTypeTLSALPN01, token)
}
+
+// NewChallenge constructs a random challenge of the given kind. It returns an
+// error if the challenge type is unrecognized. If token is empty a random token
+// will be generated, otherwise the provided token is used.
+func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) {
+ switch kind {
+ case ChallengeTypeHTTP01:
+ return HTTPChallenge01(token), nil
+ case ChallengeTypeDNS01:
+ return DNSChallenge01(token), nil
+ case ChallengeTypeTLSALPN01:
+ return TLSALPNChallenge01(token), nil
+ default:
+ return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind)
+ }
+}
diff --git a/vendor/github.com/letsencrypt/boulder/core/interfaces.go b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
index 6846bf88807..003329c3f55 100644
--- a/vendor/github.com/letsencrypt/boulder/core/interfaces.go
+++ b/vendor/github.com/letsencrypt/boulder/core/interfaces.go
@@ -7,7 +7,8 @@ import (
// PolicyAuthority defines the public interface for the Boulder PA
// TODO(#5891): Move this interface to a more appropriate location.
type PolicyAuthority interface {
- WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error
- ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error)
- ChallengeTypeEnabled(t AcmeChallenge) bool
+ WillingToIssueWildcards([]identifier.ACMEIdentifier) error
+ ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error)
+ ChallengeTypeEnabled(AcmeChallenge) bool
+ CheckAuthz(*Authorization) error
}
diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go
index 47ba4b70c16..b52f0f5e0ab 100644
--- a/vendor/github.com/letsencrypt/boulder/core/objects.go
+++ b/vendor/github.com/letsencrypt/boulder/core/objects.go
@@ -2,7 +2,6 @@ package core
import (
"crypto"
- "crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
@@ -12,7 +11,7 @@ import (
"time"
"golang.org/x/crypto/ocsp"
- "gopkg.in/square/go-jose.v2"
+ "gopkg.in/go-jose/go-jose.v2"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
@@ -53,7 +52,6 @@ const (
type AcmeChallenge string
// These types are the available challenges
-// TODO(#5009): Make this a custom type as well.
const (
ChallengeTypeHTTP01 = AcmeChallenge("http-01")
ChallengeTypeDNS01 = AcmeChallenge("dns-01")
@@ -77,54 +75,25 @@ type OCSPStatus string
const (
OCSPStatusGood = OCSPStatus("good")
OCSPStatusRevoked = OCSPStatus("revoked")
+ // Not a real OCSP status. This is a placeholder we write before the
+ // actual precertificate is issued, to ensure we never return "good" before
+ // issuance succeeds, for BR compliance reasons.
+ OCSPStatusNotReady = OCSPStatus("wait")
)
var OCSPStatusToInt = map[OCSPStatus]int{
- OCSPStatusGood: ocsp.Good,
- OCSPStatusRevoked: ocsp.Revoked,
+ OCSPStatusGood: ocsp.Good,
+ OCSPStatusRevoked: ocsp.Revoked,
+ OCSPStatusNotReady: -1,
}
// DNSPrefix is attached to DNS names in DNS challenges
const DNSPrefix = "_acme-challenge"
-// CertificateRequest is just a CSR
-//
-// This data is unmarshalled from JSON by way of RawCertificateRequest, which
-// represents the actual structure received from the client.
-type CertificateRequest struct {
- CSR *x509.CertificateRequest // The CSR
- Bytes []byte // The original bytes of the CSR, for logging.
-}
-
type RawCertificateRequest struct {
CSR JSONBuffer `json:"csr"` // The encoded CSR
}
-// UnmarshalJSON provides an implementation for decoding CertificateRequest objects.
-func (cr *CertificateRequest) UnmarshalJSON(data []byte) error {
- var raw RawCertificateRequest
- err := json.Unmarshal(data, &raw)
- if err != nil {
- return err
- }
-
- csr, err := x509.ParseCertificateRequest(raw.CSR)
- if err != nil {
- return err
- }
-
- cr.CSR = csr
- cr.Bytes = raw.CSR
- return nil
-}
-
-// MarshalJSON provides an implementation for encoding CertificateRequest objects.
-func (cr CertificateRequest) MarshalJSON() ([]byte, error) {
- return json.Marshal(RawCertificateRequest{
- CSR: cr.CSR.Raw,
- })
-}
-
// Registration objects represent non-public metadata attached
// to account keys.
type Registration struct {
@@ -156,7 +125,7 @@ type ValidationRecord struct {
URL string `json:"url,omitempty"`
// Shared
- Hostname string `json:"hostname"`
+ Hostname string `json:"hostname,omitempty"`
Port string `json:"port,omitempty"`
AddressesResolved []net.IP `json:"addressesResolved,omitempty"`
AddressUsed net.IP `json:"addressUsed,omitempty"`
@@ -373,14 +342,18 @@ type Authorization struct {
// slice and the order of these challenges may not be predictable.
Challenges []Challenge `json:"challenges,omitempty" db:"-"`
- // This field is deprecated. It's filled in by WFE for the ACMEv1 API.
- Combinations [][]int `json:"combinations,omitempty" db:"combinations"`
-
- // Wildcard is a Boulder-specific Authorization field that indicates the
- // authorization was created as a result of an order containing a name with
- // a `*.`wildcard prefix. This will help convey to users that an
- // Authorization with the identifier `example.com` and one DNS-01 challenge
- // corresponds to a name `*.example.com` from an associated order.
+ // https://datatracker.ietf.org/doc/html/rfc8555#page-29
+ //
+ // wildcard (optional, boolean): This field MUST be present and true
+ // for authorizations created as a result of a newOrder request
+ // containing a DNS identifier with a value that was a wildcard
+ // domain name. For other authorizations, it MUST be absent.
+ // Wildcard domain names are described in Section 7.1.3.
+ //
+ // This is not represented in the database because we calculate it from
+ // the identifier stored in the database. Unlike the identifier returned
+ // as part of the authorization, the identifier we store in the database
+ // can contain an asterisk.
Wildcard bool `json:"wildcard,omitempty" db:"-"`
}
@@ -399,38 +372,25 @@ func (authz *Authorization) FindChallengeByStringID(id string) int {
// SolvedBy will look through the Authorizations challenges, returning the type
// of the *first* challenge it finds with Status: valid, or an error if no
// challenge is valid.
-func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) {
+func (authz *Authorization) SolvedBy() (AcmeChallenge, error) {
if len(authz.Challenges) == 0 {
- return nil, fmt.Errorf("Authorization has no challenges")
+ return "", fmt.Errorf("Authorization has no challenges")
}
for _, chal := range authz.Challenges {
if chal.Status == StatusValid {
- return &chal.Type, nil
+ return chal.Type, nil
}
}
- return nil, fmt.Errorf("Authorization not solved by any challenge")
+ return "", fmt.Errorf("Authorization not solved by any challenge")
}
// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
// with stripped padding.
type JSONBuffer []byte
-// URL-safe base64 encode that strips padding
-func base64URLEncode(data []byte) string {
- var result = base64.URLEncoding.EncodeToString(data)
- return strings.TrimRight(result, "=")
-}
-
-// URL-safe base64 decoder that adds padding
-func base64URLDecode(data string) ([]byte, error) {
- var missing = (4 - len(data)%4) % 4
- data += strings.Repeat("=", missing)
- return base64.URLEncoding.DecodeString(data)
-}
-
// MarshalJSON encodes a JSONBuffer for transmission.
func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
- return json.Marshal(base64URLEncode(jb))
+ return json.Marshal(base64.RawURLEncoding.EncodeToString(jb))
}
// UnmarshalJSON decodes a JSONBuffer to an object.
@@ -440,7 +400,7 @@ func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
if err != nil {
return err
}
- *jb, err = base64URLDecode(str)
+ *jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "="))
return
}
@@ -458,53 +418,46 @@ type Certificate struct {
}
// CertificateStatus structs are internal to the server. They represent the
-// latest data about the status of the certificate, required for OCSP updating
-// and for validating that the subscriber has accepted the certificate.
+// latest data about the status of the certificate, required for generating new
+// OCSP responses and determining if a certificate has been revoked.
type CertificateStatus struct {
ID int64 `db:"id"`
Serial string `db:"serial"`
// status: 'good' or 'revoked'. Note that good, expired certificates remain
- // with status 'good' but don't necessarily get fresh OCSP responses.
+ // with status 'good' but don't necessarily get fresh OCSP responses.
Status OCSPStatus `db:"status"`
// ocspLastUpdated: The date and time of the last time we generated an OCSP
- // response. If we have never generated one, this has the zero value of
- // time.Time, i.e. Jan 1 1970.
+ // response. If we have never generated one, this has the zero value of
+ // time.Time, i.e. Jan 1 1970.
OCSPLastUpdated time.Time `db:"ocspLastUpdated"`
// revokedDate: If status is 'revoked', this is the date and time it was
- // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970.
+ // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970.
RevokedDate time.Time `db:"revokedDate"`
// revokedReason: If status is 'revoked', this is the reason code for the
- // revocation. Otherwise it is zero (which happens to be the reason
- // code for 'unspecified').
+ // revocation. Otherwise it is zero (which happens to be the reason
+ // code for 'unspecified').
RevokedReason revocation.Reason `db:"revokedReason"`
LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
- // The encoded and signed OCSP response.
- OCSPResponse []byte `db:"ocspResponse"`
-
- // For performance reasons[0] we duplicate the `Expires` field of the
- // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN`
- // later on just to retrieve this `Time` value. This helps both the OCSP
- // updater and the expiration-mailer stay performant.
- //
- // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus`
- // table that the OCSP updater so that the database can create a meaningful
- // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`.
- // For more detail see Boulder #1864[0].
- //
- // [0]: https://github.com/letsencrypt/boulder/issues/1864
+ // NotAfter and IsExpired are convenience columns which allow expensive
+ // queries to quickly filter out certificates that we don't need to care about
+ // anymore. These are particularly useful for the expiration mailer and CRL
+ // updater. See https://github.com/letsencrypt/boulder/issues/1864.
NotAfter time.Time `db:"notAfter"`
IsExpired bool `db:"isExpired"`
- // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer
- // has to support both IssuerNameIDs and IssuerIDs.
- IssuerID int64
+ // Note: this is not an issuance.IssuerNameID because that would create an
+ // import cycle between core and issuance.
+ // Note2: This field used to be called `issuerID`. We keep the old name in
+ // the DB, but update the Go field name to be clear which type of ID this
+ // is.
+ IssuerNameID int64 `db:"issuerID"`
}
// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames
@@ -553,7 +506,7 @@ func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo {
}
// RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested
-// window in the past. Per the draft-ietf-acme-ari-00 spec, clients should
+// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should
// attempt to renew immediately if the suggested window is in the past. The
// passed `now` is assumed to be a timestamp representing the current moment in
// time.
diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go
deleted file mode 100644
index 02b3515bdcd..00000000000
--- a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go
+++ /dev/null
@@ -1,1182 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.0
-// protoc v3.20.1
-// source: core.proto
-
-package proto
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type Challenge struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"`
- Uri string `protobuf:"bytes,9,opt,name=uri,proto3" json:"uri,omitempty"`
- Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
- KeyAuthorization string `protobuf:"bytes,5,opt,name=keyAuthorization,proto3" json:"keyAuthorization,omitempty"`
- Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"`
- Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"`
- Validated int64 `protobuf:"varint,11,opt,name=validated,proto3" json:"validated,omitempty"`
-}
-
-func (x *Challenge) Reset() {
- *x = Challenge{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Challenge) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Challenge) ProtoMessage() {}
-
-func (x *Challenge) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Challenge.ProtoReflect.Descriptor instead.
-func (*Challenge) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Challenge) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *Challenge) GetType() string {
- if x != nil {
- return x.Type
- }
- return ""
-}
-
-func (x *Challenge) GetStatus() string {
- if x != nil {
- return x.Status
- }
- return ""
-}
-
-func (x *Challenge) GetUri() string {
- if x != nil {
- return x.Uri
- }
- return ""
-}
-
-func (x *Challenge) GetToken() string {
- if x != nil {
- return x.Token
- }
- return ""
-}
-
-func (x *Challenge) GetKeyAuthorization() string {
- if x != nil {
- return x.KeyAuthorization
- }
- return ""
-}
-
-func (x *Challenge) GetValidationrecords() []*ValidationRecord {
- if x != nil {
- return x.Validationrecords
- }
- return nil
-}
-
-func (x *Challenge) GetError() *ProblemDetails {
- if x != nil {
- return x.Error
- }
- return nil
-}
-
-func (x *Challenge) GetValidated() int64 {
- if x != nil {
- return x.Validated
- }
- return 0
-}
-
-type ValidationRecord struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
- Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"`
- AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // net.IP.MarshalText()
- AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // net.IP.MarshalText()
- Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"`
- Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"`
- // A list of addresses tried before the address used (see
- // core/objects.go and the comment on the ValidationRecord structure
- // definition for more information.
- AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // net.IP.MarshalText()
-}
-
-func (x *ValidationRecord) Reset() {
- *x = ValidationRecord{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidationRecord) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidationRecord) ProtoMessage() {}
-
-func (x *ValidationRecord) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidationRecord.ProtoReflect.Descriptor instead.
-func (*ValidationRecord) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *ValidationRecord) GetHostname() string {
- if x != nil {
- return x.Hostname
- }
- return ""
-}
-
-func (x *ValidationRecord) GetPort() string {
- if x != nil {
- return x.Port
- }
- return ""
-}
-
-func (x *ValidationRecord) GetAddressesResolved() [][]byte {
- if x != nil {
- return x.AddressesResolved
- }
- return nil
-}
-
-func (x *ValidationRecord) GetAddressUsed() []byte {
- if x != nil {
- return x.AddressUsed
- }
- return nil
-}
-
-func (x *ValidationRecord) GetAuthorities() []string {
- if x != nil {
- return x.Authorities
- }
- return nil
-}
-
-func (x *ValidationRecord) GetUrl() string {
- if x != nil {
- return x.Url
- }
- return ""
-}
-
-func (x *ValidationRecord) GetAddressesTried() [][]byte {
- if x != nil {
- return x.AddressesTried
- }
- return nil
-}
-
-type ProblemDetails struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"`
- Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
- HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"`
-}
-
-func (x *ProblemDetails) Reset() {
- *x = ProblemDetails{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ProblemDetails) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ProblemDetails) ProtoMessage() {}
-
-func (x *ProblemDetails) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ProblemDetails.ProtoReflect.Descriptor instead.
-func (*ProblemDetails) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ProblemDetails) GetProblemType() string {
- if x != nil {
- return x.ProblemType
- }
- return ""
-}
-
-func (x *ProblemDetails) GetDetail() string {
- if x != nil {
- return x.Detail
- }
- return ""
-}
-
-func (x *ProblemDetails) GetHttpStatus() int32 {
- if x != nil {
- return x.HttpStatus
- }
- return 0
-}
-
-type Certificate struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
- Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"`
- Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"`
- Issued int64 `protobuf:"varint,5,opt,name=issued,proto3" json:"issued,omitempty"` // Unix timestamp (nanoseconds)
- Expires int64 `protobuf:"varint,6,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *Certificate) Reset() {
- *x = Certificate{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Certificate) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Certificate) ProtoMessage() {}
-
-func (x *Certificate) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Certificate.ProtoReflect.Descriptor instead.
-func (*Certificate) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Certificate) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *Certificate) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *Certificate) GetDigest() string {
- if x != nil {
- return x.Digest
- }
- return ""
-}
-
-func (x *Certificate) GetDer() []byte {
- if x != nil {
- return x.Der
- }
- return nil
-}
-
-func (x *Certificate) GetIssued() int64 {
- if x != nil {
- return x.Issued
- }
- return 0
-}
-
-func (x *Certificate) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-type CertificateStatus struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
- Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
- OcspLastUpdated int64 `protobuf:"varint,4,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"`
- RevokedDate int64 `protobuf:"varint,5,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"`
- RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"`
- LastExpirationNagSent int64 `protobuf:"varint,7,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"`
- OcspResponse []byte `protobuf:"bytes,8,opt,name=ocspResponse,proto3" json:"ocspResponse,omitempty"`
- NotAfter int64 `protobuf:"varint,9,opt,name=notAfter,proto3" json:"notAfter,omitempty"`
- IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"`
- IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
-}
-
-func (x *CertificateStatus) Reset() {
- *x = CertificateStatus{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CertificateStatus) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CertificateStatus) ProtoMessage() {}
-
-func (x *CertificateStatus) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CertificateStatus.ProtoReflect.Descriptor instead.
-func (*CertificateStatus) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *CertificateStatus) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *CertificateStatus) GetStatus() string {
- if x != nil {
- return x.Status
- }
- return ""
-}
-
-func (x *CertificateStatus) GetOcspLastUpdated() int64 {
- if x != nil {
- return x.OcspLastUpdated
- }
- return 0
-}
-
-func (x *CertificateStatus) GetRevokedDate() int64 {
- if x != nil {
- return x.RevokedDate
- }
- return 0
-}
-
-func (x *CertificateStatus) GetRevokedReason() int64 {
- if x != nil {
- return x.RevokedReason
- }
- return 0
-}
-
-func (x *CertificateStatus) GetLastExpirationNagSent() int64 {
- if x != nil {
- return x.LastExpirationNagSent
- }
- return 0
-}
-
-func (x *CertificateStatus) GetOcspResponse() []byte {
- if x != nil {
- return x.OcspResponse
- }
- return nil
-}
-
-func (x *CertificateStatus) GetNotAfter() int64 {
- if x != nil {
- return x.NotAfter
- }
- return 0
-}
-
-func (x *CertificateStatus) GetIsExpired() bool {
- if x != nil {
- return x.IsExpired
- }
- return false
-}
-
-func (x *CertificateStatus) GetIssuerID() int64 {
- if x != nil {
- return x.IssuerID
- }
- return 0
-}
-
-type Registration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"`
- ContactsPresent bool `protobuf:"varint,4,opt,name=contactsPresent,proto3" json:"contactsPresent,omitempty"`
- Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"`
- InitialIP []byte `protobuf:"bytes,6,opt,name=initialIP,proto3" json:"initialIP,omitempty"`
- CreatedAt int64 `protobuf:"varint,7,opt,name=createdAt,proto3" json:"createdAt,omitempty"` // Unix timestamp (nanoseconds)
- Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
-}
-
-func (x *Registration) Reset() {
- *x = Registration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Registration) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Registration) ProtoMessage() {}
-
-func (x *Registration) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Registration.ProtoReflect.Descriptor instead.
-func (*Registration) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *Registration) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *Registration) GetKey() []byte {
- if x != nil {
- return x.Key
- }
- return nil
-}
-
-func (x *Registration) GetContact() []string {
- if x != nil {
- return x.Contact
- }
- return nil
-}
-
-func (x *Registration) GetContactsPresent() bool {
- if x != nil {
- return x.ContactsPresent
- }
- return false
-}
-
-func (x *Registration) GetAgreement() string {
- if x != nil {
- return x.Agreement
- }
- return ""
-}
-
-func (x *Registration) GetInitialIP() []byte {
- if x != nil {
- return x.InitialIP
- }
- return nil
-}
-
-func (x *Registration) GetCreatedAt() int64 {
- if x != nil {
- return x.CreatedAt
- }
- return 0
-}
-
-func (x *Registration) GetStatus() string {
- if x != nil {
- return x.Status
- }
- return ""
-}
-
-type Authorization struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- Identifier string `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"`
- RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
- Expires int64 `protobuf:"varint,5,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
- Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"`
-}
-
-func (x *Authorization) Reset() {
- *x = Authorization{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Authorization) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Authorization) ProtoMessage() {}
-
-func (x *Authorization) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Authorization.ProtoReflect.Descriptor instead.
-func (*Authorization) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *Authorization) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *Authorization) GetIdentifier() string {
- if x != nil {
- return x.Identifier
- }
- return ""
-}
-
-func (x *Authorization) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *Authorization) GetStatus() string {
- if x != nil {
- return x.Status
- }
- return ""
-}
-
-func (x *Authorization) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-func (x *Authorization) GetChallenges() []*Challenge {
- if x != nil {
- return x.Challenges
- }
- return nil
-}
-
-type Order struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"`
- Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
- CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
- Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"`
- Names []string `protobuf:"bytes,8,rep,name=names,proto3" json:"names,omitempty"`
- BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"`
- Created int64 `protobuf:"varint,10,opt,name=created,proto3" json:"created,omitempty"`
- V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
-}
-
-func (x *Order) Reset() {
- *x = Order{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Order) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Order) ProtoMessage() {}
-
-func (x *Order) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Order.ProtoReflect.Descriptor instead.
-func (*Order) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *Order) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *Order) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *Order) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-func (x *Order) GetError() *ProblemDetails {
- if x != nil {
- return x.Error
- }
- return nil
-}
-
-func (x *Order) GetCertificateSerial() string {
- if x != nil {
- return x.CertificateSerial
- }
- return ""
-}
-
-func (x *Order) GetStatus() string {
- if x != nil {
- return x.Status
- }
- return ""
-}
-
-func (x *Order) GetNames() []string {
- if x != nil {
- return x.Names
- }
- return nil
-}
-
-func (x *Order) GetBeganProcessing() bool {
- if x != nil {
- return x.BeganProcessing
- }
- return false
-}
-
-func (x *Order) GetCreated() int64 {
- if x != nil {
- return x.Created
- }
- return 0
-}
-
-func (x *Order) GetV2Authorizations() []int64 {
- if x != nil {
- return x.V2Authorizations
- }
- return nil
-}
-
-type CRLEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
- Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
- RevokedAt int64 `protobuf:"varint,3,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *CRLEntry) Reset() {
- *x = CRLEntry{}
- if protoimpl.UnsafeEnabled {
- mi := &file_core_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CRLEntry) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CRLEntry) ProtoMessage() {}
-
-func (x *CRLEntry) ProtoReflect() protoreflect.Message {
- mi := &file_core_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CRLEntry.ProtoReflect.Descriptor instead.
-func (*CRLEntry) Descriptor() ([]byte, []int) {
- return file_core_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *CRLEntry) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *CRLEntry) GetReason() int32 {
- if x != nil {
- return x.Reason
- }
- return 0
-}
-
-func (x *CRLEntry) GetRevokedAt() int64 {
- if x != nil {
- return x.RevokedAt
- }
- return 0
-}
-
-var File_core_proto protoreflect.FileDescriptor
-
-var file_core_proto_rawDesc = []byte{
- 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f,
- 0x72, 0x65, 0x22, 0xab, 0x02, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65,
- 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
- 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03,
- 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x14,
- 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
- 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10,
- 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65,
- 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f,
- 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72,
- 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f,
- 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64,
- 0x22, 0xee, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
- 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
- 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c,
- 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55, 0x73,
- 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
- 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
- 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64, 0x64,
- 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28,
- 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65,
- 0x64, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61,
- 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79,
- 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65,
- 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a,
- 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa9, 0x01,
- 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a,
- 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a,
- 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64,
- 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65,
- 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12,
- 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0xeb, 0x02, 0x0a, 0x11, 0x43, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x28, 0x0a, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61,
- 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x76,
- 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b,
- 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72,
- 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f,
- 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f,
- 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6e,
- 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e,
- 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70,
- 0x69, 0x72, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x78,
- 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
- 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49,
- 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69,
- 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e,
- 0x74, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73,
- 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63,
- 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, 0x1c,
- 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09,
- 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x22, 0xd6, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
- 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69,
- 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69,
- 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a,
- 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67,
- 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08,
- 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xd7, 0x02, 0x0a, 0x05, 0x4f, 0x72,
- 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67,
- 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65,
- 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78,
- 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62,
- 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73,
- 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x28, 0x0a,
- 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67,
- 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f,
- 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41,
- 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08,
- 0x06, 0x10, 0x07, 0x22, 0x58, 0x0a, 0x08, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12,
- 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x42, 0x2b, 0x5a,
- 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f,
- 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-}
-
-var (
- file_core_proto_rawDescOnce sync.Once
- file_core_proto_rawDescData = file_core_proto_rawDesc
-)
-
-func file_core_proto_rawDescGZIP() []byte {
- file_core_proto_rawDescOnce.Do(func() {
- file_core_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_proto_rawDescData)
- })
- return file_core_proto_rawDescData
-}
-
-var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_core_proto_goTypes = []interface{}{
- (*Challenge)(nil), // 0: core.Challenge
- (*ValidationRecord)(nil), // 1: core.ValidationRecord
- (*ProblemDetails)(nil), // 2: core.ProblemDetails
- (*Certificate)(nil), // 3: core.Certificate
- (*CertificateStatus)(nil), // 4: core.CertificateStatus
- (*Registration)(nil), // 5: core.Registration
- (*Authorization)(nil), // 6: core.Authorization
- (*Order)(nil), // 7: core.Order
- (*CRLEntry)(nil), // 8: core.CRLEntry
-}
-var file_core_proto_depIdxs = []int32{
- 1, // 0: core.Challenge.validationrecords:type_name -> core.ValidationRecord
- 2, // 1: core.Challenge.error:type_name -> core.ProblemDetails
- 0, // 2: core.Authorization.challenges:type_name -> core.Challenge
- 2, // 3: core.Order.error:type_name -> core.ProblemDetails
- 4, // [4:4] is the sub-list for method output_type
- 4, // [4:4] is the sub-list for method input_type
- 4, // [4:4] is the sub-list for extension type_name
- 4, // [4:4] is the sub-list for extension extendee
- 0, // [0:4] is the sub-list for field type_name
-}
-
-func init() { file_core_proto_init() }
-func file_core_proto_init() {
- if File_core_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_core_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Challenge); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidationRecord); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ProblemDetails); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Certificate); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CertificateStatus); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Registration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Authorization); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Order); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_core_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CRLEntry); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_core_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 9,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_core_proto_goTypes,
- DependencyIndexes: file_core_proto_depIdxs,
- MessageInfos: file_core_proto_msgTypes,
- }.Build()
- File_core_proto = out.File
- file_core_proto_rawDesc = nil
- file_core_proto_goTypes = nil
- file_core_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto
deleted file mode 100644
index 946bb16c875..00000000000
--- a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto
+++ /dev/null
@@ -1,101 +0,0 @@
-syntax = "proto3";
-
-package core;
-option go_package = "github.com/letsencrypt/boulder/core/proto";
-
-message Challenge {
- int64 id = 1;
- string type = 2;
- string status = 6;
- string uri = 9;
- string token = 3;
- string keyAuthorization = 5;
- repeated ValidationRecord validationrecords = 10;
- ProblemDetails error = 7;
- int64 validated = 11;
-}
-
-message ValidationRecord {
- string hostname = 1;
- string port = 2;
- repeated bytes addressesResolved = 3; // net.IP.MarshalText()
- bytes addressUsed = 4; // net.IP.MarshalText()
-
- repeated string authorities = 5;
- string url = 6;
- // A list of addresses tried before the address used (see
- // core/objects.go and the comment on the ValidationRecord structure
- // definition for more information.
- repeated bytes addressesTried = 7; // net.IP.MarshalText()
-}
-
-message ProblemDetails {
- string problemType = 1;
- string detail = 2;
- int32 httpStatus = 3;
-}
-
-message Certificate {
- int64 registrationID = 1;
- string serial = 2;
- string digest = 3;
- bytes der = 4;
- int64 issued = 5; // Unix timestamp (nanoseconds)
- int64 expires = 6; // Unix timestamp (nanoseconds)
-}
-
-message CertificateStatus {
- string serial = 1;
- reserved 2; // previously subscriberApproved
- string status = 3;
- int64 ocspLastUpdated = 4;
- int64 revokedDate = 5;
- int64 revokedReason = 6;
- int64 lastExpirationNagSent = 7;
- bytes ocspResponse = 8;
- int64 notAfter = 9;
- bool isExpired = 10;
- int64 issuerID = 11;
-}
-
-message Registration {
- int64 id = 1;
- bytes key = 2;
- repeated string contact = 3;
- bool contactsPresent = 4;
- string agreement = 5;
- bytes initialIP = 6;
- int64 createdAt = 7; // Unix timestamp (nanoseconds)
- string status = 8;
-}
-
-message Authorization {
- string id = 1;
- string identifier = 2;
- int64 registrationID = 3;
- string status = 4;
- int64 expires = 5; // Unix timestamp (nanoseconds)
- repeated core.Challenge challenges = 6;
- reserved 7; // previously combinations
- reserved 8; // previously v2
-}
-
-message Order {
- int64 id = 1;
- int64 registrationID = 2;
- int64 expires = 3;
- ProblemDetails error = 4;
- string certificateSerial = 5;
- reserved 6; // previously authorizations, deprecated in favor of v2Authorizations
- string status = 7;
- repeated string names = 8;
- bool beganProcessing = 9;
- int64 created = 10;
- repeated int64 v2Authorizations = 11;
-}
-
-message CRLEntry {
- string serial = 1;
- int32 reason = 2;
- int64 revokedAt = 3; // Unix timestamp (nanoseconds)
-}
diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go
index b9ac7047aa1..d7fe0266895 100644
--- a/vendor/github.com/letsencrypt/boulder/core/util.go
+++ b/vendor/github.com/letsencrypt/boulder/core/util.go
@@ -1,9 +1,10 @@
package core
import (
- "bytes"
"crypto"
+ "crypto/ecdsa"
"crypto/rand"
+ "crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
@@ -16,6 +17,7 @@ import (
"math/big"
mrand "math/rand"
"os"
+ "path"
"reflect"
"regexp"
"sort"
@@ -23,7 +25,7 @@ import (
"time"
"unicode"
- jose "gopkg.in/square/go-jose.v2"
+ "gopkg.in/go-jose/go-jose.v2"
)
const Unspecified = "Unspecified"
@@ -96,7 +98,7 @@ func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) {
switch t := key.(type) {
case *jose.JSONWebKey:
if t == nil {
- return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key")
+ return Sha256Digest{}, errors.New("cannot compute digest of nil key")
}
return KeyDigest(t.Key)
case jose.JSONWebKey:
@@ -132,21 +134,16 @@ func KeyDigestEquals(j, k crypto.PublicKey) bool {
return digestJ == digestK
}
-// PublicKeysEqual determines whether two public keys have the same marshalled
-// bytes as one another
-func PublicKeysEqual(a, b interface{}) (bool, error) {
- if a == nil || b == nil {
- return false, errors.New("One or more nil arguments to PublicKeysEqual")
- }
- aBytes, err := x509.MarshalPKIXPublicKey(a)
- if err != nil {
- return false, err
- }
- bBytes, err := x509.MarshalPKIXPublicKey(b)
- if err != nil {
- return false, err
+// PublicKeysEqual determines whether two public keys are identical.
+func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) {
+ switch ak := a.(type) {
+ case *rsa.PublicKey:
+ return ak.Equal(b), nil
+ case *ecdsa.PublicKey:
+ return ak.Equal(b), nil
+ default:
+ return false, fmt.Errorf("unsupported public key type %T", ak)
}
- return bytes.Equal(aBytes, bBytes), nil
}
// SerialToString converts a certificate serial number (big.Int) to a String
@@ -160,7 +157,7 @@ func SerialToString(serial *big.Int) string {
func StringToSerial(serial string) (*big.Int, error) {
var serialNum big.Int
if !ValidSerial(serial) {
- return &serialNum, errors.New("Invalid serial number")
+ return &serialNum, fmt.Errorf("invalid serial number %q", serial)
}
_, err := fmt.Sscanf(serial, "%036x", &serialNum)
return &serialNum, err
@@ -245,6 +242,14 @@ func UniqueLowerNames(names []string) (unique []string) {
return
}
+// HashNames returns a hash of the names requested. This is intended for use
+// when interacting with the orderFqdnSets table and rate limiting.
+func HashNames(names []string) []byte {
+ names = UniqueLowerNames(names)
+ hash := sha256.Sum256([]byte(strings.Join(names, ",")))
+ return hash[:]
+}
+
// LoadCert loads a PEM certificate specified by filename or returns an error
func LoadCert(filename string) (*x509.Certificate, error) {
certPEM, err := os.ReadFile(filename)
@@ -253,7 +258,7 @@ func LoadCert(filename string) (*x509.Certificate, error) {
}
block, _ := pem.Decode(certPEM)
if block == nil {
- return nil, fmt.Errorf("No data in cert PEM file %s", filename)
+ return nil, fmt.Errorf("no data in cert PEM file %q", filename)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
@@ -298,3 +303,7 @@ func IsASCII(str string) bool {
}
return true
}
+
+func Command() string {
+ return path.Base(os.Args[0])
+}
diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go
deleted file mode 100644
index 50be1087a09..00000000000
--- a/vendor/github.com/letsencrypt/boulder/errors/errors.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Package errors provides internal-facing error types for use in Boulder. Many
-// of these are transformed directly into Problem Details documents by the WFE.
-// Some, like NotFound, may be handled internally. We avoid using Problem
-// Details documents as part of our internal error system to avoid layering
-// confusions.
-//
-// These errors are specifically for use in errors that cross RPC boundaries.
-// An error type that does not need to be passed through an RPC can use a plain
-// Go type locally. Our gRPC code is aware of these error types and will
-// serialize and deserialize them automatically.
-package errors
-
-import (
- "fmt"
- "time"
-
- "github.com/letsencrypt/boulder/identifier"
-)
-
-// ErrorType provides a coarse category for BoulderErrors.
-// Objects of type ErrorType should never be directly returned by other
-// functions; instead use the methods below to create an appropriate
-// BoulderError wrapping one of these types.
-type ErrorType int
-
-// These numeric constants are used when sending berrors through gRPC.
-const (
- // InternalServer is deprecated. Instead, pass a plain Go error. That will get
- // turned into a probs.InternalServerError by the WFE.
- InternalServer ErrorType = iota
- _
- Malformed
- Unauthorized
- NotFound
- RateLimit
- RejectedIdentifier
- InvalidEmail
- ConnectionFailure
- _ // Reserved, previously WrongAuthorizationState
- CAA
- MissingSCTs
- Duplicate
- OrderNotReady
- DNS
- BadPublicKey
- BadCSR
- AlreadyRevoked
- BadRevocationReason
-)
-
-func (ErrorType) Error() string {
- return "urn:ietf:params:acme:error"
-}
-
-// BoulderError represents internal Boulder errors
-type BoulderError struct {
- Type ErrorType
- Detail string
- SubErrors []SubBoulderError
-
- // RetryAfter the duration a client should wait before retrying the request
- // which resulted in this error.
- RetryAfter time.Duration
-}
-
-// SubBoulderError represents sub-errors specific to an identifier that are
-// related to a top-level internal Boulder error.
-type SubBoulderError struct {
- *BoulderError
- Identifier identifier.ACMEIdentifier
-}
-
-func (be *BoulderError) Error() string {
- return be.Detail
-}
-
-func (be *BoulderError) Unwrap() error {
- return be.Type
-}
-
-// WithSubErrors returns a new BoulderError instance created by adding the
-// provided subErrs to the existing BoulderError.
-func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError {
- return &BoulderError{
- Type: be.Type,
- Detail: be.Detail,
- SubErrors: append(be.SubErrors, subErrs...),
- RetryAfter: be.RetryAfter,
- }
-}
-
-// New is a convenience function for creating a new BoulderError
-func New(errType ErrorType, msg string, args ...interface{}) error {
- return &BoulderError{
- Type: errType,
- Detail: fmt.Sprintf(msg, args...),
- }
-}
-
-func InternalServerError(msg string, args ...interface{}) error {
- return New(InternalServer, msg, args...)
-}
-
-func MalformedError(msg string, args ...interface{}) error {
- return New(Malformed, msg, args...)
-}
-
-func UnauthorizedError(msg string, args ...interface{}) error {
- return New(Unauthorized, msg, args...)
-}
-
-func NotFoundError(msg string, args ...interface{}) error {
- return New(NotFound, msg, args...)
-}
-
-func RateLimitError(retryAfter time.Duration, msg string, args ...interface{}) error {
- return &BoulderError{
- Type: RateLimit,
- Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
- RetryAfter: retryAfter,
- }
-}
-
-func DuplicateCertificateError(retryAfter time.Duration, msg string, args ...interface{}) error {
- return &BoulderError{
- Type: RateLimit,
- Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...),
- RetryAfter: retryAfter,
- }
-}
-
-func FailedValidationError(retryAfter time.Duration, msg string, args ...interface{}) error {
- return &BoulderError{
- Type: RateLimit,
- Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...),
- RetryAfter: retryAfter,
- }
-}
-
-func RegistrationsPerIPError(retryAfter time.Duration, msg string, args ...interface{}) error {
- return &BoulderError{
- Type: RateLimit,
- Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...),
- RetryAfter: retryAfter,
- }
-}
-
-func RejectedIdentifierError(msg string, args ...interface{}) error {
- return New(RejectedIdentifier, msg, args...)
-}
-
-func InvalidEmailError(msg string, args ...interface{}) error {
- return New(InvalidEmail, msg, args...)
-}
-
-func ConnectionFailureError(msg string, args ...interface{}) error {
- return New(ConnectionFailure, msg, args...)
-}
-
-func CAAError(msg string, args ...interface{}) error {
- return New(CAA, msg, args...)
-}
-
-func MissingSCTsError(msg string, args ...interface{}) error {
- return New(MissingSCTs, msg, args...)
-}
-
-func DuplicateError(msg string, args ...interface{}) error {
- return New(Duplicate, msg, args...)
-}
-
-func OrderNotReadyError(msg string, args ...interface{}) error {
- return New(OrderNotReady, msg, args...)
-}
-
-func DNSError(msg string, args ...interface{}) error {
- return New(DNS, msg, args...)
-}
-
-func BadPublicKeyError(msg string, args ...interface{}) error {
- return New(BadPublicKey, msg, args...)
-}
-
-func BadCSRError(msg string, args ...interface{}) error {
- return New(BadCSR, msg, args...)
-}
-
-func AlreadyRevokedError(msg string, args ...interface{}) error {
- return New(AlreadyRevoked, msg, args...)
-}
-
-func BadRevocationReasonError(reason int64) error {
- return New(BadRevocationReason, "disallowed revocation reason: %d", reason)
-}
diff --git a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go
deleted file mode 100644
index 7eadc1f9bf6..00000000000
--- a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT.
-
-package features
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[unused-0]
- _ = x[PrecertificateRevocation-1]
- _ = x[StripDefaultSchemePort-2]
- _ = x[NonCFSSLSigner-3]
- _ = x[StoreIssuerInfo-4]
- _ = x[StreamlineOrderAndAuthzs-5]
- _ = x[V1DisableNewValidations-6]
- _ = x[ExpirationMailerDontLookTwice-7]
- _ = x[OldTLSInbound-8]
- _ = x[OldTLSOutbound-9]
- _ = x[ROCSPStage1-10]
- _ = x[ROCSPStage2-11]
- _ = x[ROCSPStage3-12]
- _ = x[CAAValidationMethods-13]
- _ = x[CAAAccountURI-14]
- _ = x[EnforceMultiVA-15]
- _ = x[MultiVAFullResults-16]
- _ = x[MandatoryPOSTAsGET-17]
- _ = x[AllowV1Registration-18]
- _ = x[StoreRevokerInfo-19]
- _ = x[RestrictRSAKeySizes-20]
- _ = x[FasterNewOrdersRateLimit-21]
- _ = x[ECDSAForAll-22]
- _ = x[ServeRenewalInfo-23]
- _ = x[GetAuthzReadOnly-24]
- _ = x[GetAuthzUseIndex-25]
- _ = x[CheckFailedAuthorizationsFirst-26]
- _ = x[AllowReRevocation-27]
- _ = x[MozRevocationReasons-28]
- _ = x[SHA1CSRs-29]
- _ = x[AllowUnrecognizedFeatures-30]
- _ = x[RejectDuplicateCSRExtensions-31]
- _ = x[ROCSPStage6-32]
- _ = x[ROCSPStage7-33]
-}
-
-const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceOldTLSInboundOldTLSOutboundROCSPStage1ROCSPStage2ROCSPStage3CAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage6ROCSPStage7"
-
-var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 170, 184, 195, 206, 217, 237, 250, 264, 282, 300, 319, 335, 354, 378, 389, 405, 421, 437, 467, 484, 504, 512, 537, 565, 576, 587}
-
-func (i FeatureFlag) String() string {
- if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) {
- return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]]
-}
diff --git a/vendor/github.com/letsencrypt/boulder/features/features.go b/vendor/github.com/letsencrypt/boulder/features/features.go
deleted file mode 100644
index 6db20d7deac..00000000000
--- a/vendor/github.com/letsencrypt/boulder/features/features.go
+++ /dev/null
@@ -1,203 +0,0 @@
-//go:generate stringer -type=FeatureFlag
-
-package features
-
-import (
- "fmt"
- "strings"
- "sync"
-)
-
-type FeatureFlag int
-
-const (
- unused FeatureFlag = iota // unused is used for testing
- // Deprecated features, these can be removed once stripped from production configs
- PrecertificateRevocation
- StripDefaultSchemePort
- NonCFSSLSigner
- StoreIssuerInfo
- StreamlineOrderAndAuthzs
- V1DisableNewValidations
- ExpirationMailerDontLookTwice
- OldTLSInbound
- OldTLSOutbound
- ROCSPStage1
- ROCSPStage2
- ROCSPStage3
-
- // Currently in-use features
- // Check CAA and respect validationmethods parameter.
- CAAValidationMethods
- // Check CAA and respect accounturi parameter.
- CAAAccountURI
- // EnforceMultiVA causes the VA to block on remote VA PerformValidation
- // requests in order to make a valid/invalid decision with the results.
- EnforceMultiVA
- // MultiVAFullResults will cause the main VA to wait for all of the remote VA
- // results, not just the threshold required to make a decision.
- MultiVAFullResults
- // MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME
- // resources.
- MandatoryPOSTAsGET
- // Allow creation of new registrations in ACMEv1.
- AllowV1Registration
- // StoreRevokerInfo enables storage of the revoker and a bool indicating if the row
- // was checked for extant unrevoked certificates in the blockedKeys table.
- StoreRevokerInfo
- // RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to
- // the common sizes (2048, 3072, and 4096 bits).
- RestrictRSAKeySizes
- // FasterNewOrdersRateLimit enables use of a separate table for counting the
- // new orders rate limit.
- FasterNewOrdersRateLimit
- // ECDSAForAll enables all accounts, regardless of their presence in the CA's
- // ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers.
- ECDSAForAll
- // ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
- // GET requests. WARNING: This feature is a draft and highly unstable.
- ServeRenewalInfo
- // GetAuthzReadOnly causes the SA to use its read-only database connection
- // (which is generally pointed at a replica rather than the primary db) when
- // querying the authz2 table.
- GetAuthzReadOnly
- // GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it
- // queries the authz2 table.
- GetAuthzUseIndex
- // Check the failed authorization limit before doing authz reuse.
- CheckFailedAuthorizationsFirst
- // AllowReRevocation causes the RA to allow the revocation reason of an
- // already-revoked certificate to be updated to `keyCompromise` from any
- // other reason if that compromise is demonstrated by making the second
- // revocation request signed by the certificate keypair.
- AllowReRevocation
- // MozRevocationReasons causes the RA to enforce the following upcoming
- // Mozilla policies regarding revocation:
- // - A subscriber can request that their certificate be revoked with reason
- // keyCompromise, even without demonstrating that compromise at the time.
- // However, the cert's pubkey will not be added to the blocked keys list.
- // - When an applicant other than the original subscriber requests that a
- // certificate be revoked (by demonstrating control over all names in it),
- // the cert will be revoked with reason cessationOfOperation, regardless of
- // what revocation reason they request.
- // - When anyone requests that a certificate be revoked by signing the request
- // with the certificate's keypair, the cert will be revoked with reason
- // keyCompromise, regardless of what revocation reason they request.
- MozRevocationReasons
- // SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that
- // are self-signed using SHA1.
- SHA1CSRs
- // AllowUnrecognizedFeatures is internal to the features package: if true,
- // skip error when unrecognized feature flag names are passed.
- AllowUnrecognizedFeatures
- // RejectDuplicateCSRExtensions enables verification that submitted CSRs do
- // not contain duplicate extensions. This behavior will be on by default in
- // go1.19.
- RejectDuplicateCSRExtensions
-
- // ROCSPStage6 disables writing full OCSP Responses to MariaDB during
- // (pre)certificate issuance and during revocation. Because Stage 4 involved
- // disabling ocsp-updater, this means that no ocsp response bytes will be
- // written to the database anymore.
- ROCSPStage6
- // ROCSPStage7 disables generating OCSP responses during issuance and
- // revocation. This affects codepaths in both the RA (revocation) and the CA
- // (precert "birth certificates").
- ROCSPStage7
-)
-
-// List of features and their default value, protected by fMu
-var features = map[FeatureFlag]bool{
- unused: false,
- CAAValidationMethods: false,
- CAAAccountURI: false,
- EnforceMultiVA: false,
- MultiVAFullResults: false,
- MandatoryPOSTAsGET: false,
- AllowV1Registration: true,
- V1DisableNewValidations: false,
- PrecertificateRevocation: false,
- StripDefaultSchemePort: false,
- StoreIssuerInfo: false,
- StoreRevokerInfo: false,
- RestrictRSAKeySizes: false,
- FasterNewOrdersRateLimit: false,
- NonCFSSLSigner: false,
- ECDSAForAll: false,
- StreamlineOrderAndAuthzs: false,
- ServeRenewalInfo: false,
- GetAuthzReadOnly: false,
- GetAuthzUseIndex: false,
- CheckFailedAuthorizationsFirst: false,
- AllowReRevocation: false,
- MozRevocationReasons: false,
- OldTLSOutbound: true,
- OldTLSInbound: true,
- SHA1CSRs: true,
- AllowUnrecognizedFeatures: false,
- ExpirationMailerDontLookTwice: false,
- RejectDuplicateCSRExtensions: false,
- ROCSPStage1: false,
- ROCSPStage2: false,
- ROCSPStage3: false,
- ROCSPStage6: false,
- ROCSPStage7: false,
-}
-
-var fMu = new(sync.RWMutex)
-
-var initial = map[FeatureFlag]bool{}
-
-var nameToFeature = make(map[string]FeatureFlag, len(features))
-
-func init() {
- for f, v := range features {
- nameToFeature[f.String()] = f
- initial[f] = v
- }
-}
-
-// Set accepts a list of features and whether they should
-// be enabled or disabled. In the presence of unrecognized
-// flags, it will return an error or not depending on the
-// value of AllowUnrecognizedFeatures.
-func Set(featureSet map[string]bool) error {
- fMu.Lock()
- defer fMu.Unlock()
- var unknown []string
- for n, v := range featureSet {
- f, present := nameToFeature[n]
- if present {
- features[f] = v
- } else {
- unknown = append(unknown, n)
- }
- }
- if len(unknown) > 0 && !features[AllowUnrecognizedFeatures] {
- return fmt.Errorf("unrecognized feature flag names: %s",
- strings.Join(unknown, ", "))
- }
- return nil
-}
-
-// Enabled returns true if the feature is enabled or false
-// if it isn't, it will panic if passed a feature that it
-// doesn't know.
-func Enabled(n FeatureFlag) bool {
- fMu.RLock()
- defer fMu.RUnlock()
- v, present := features[n]
- if !present {
- panic(fmt.Sprintf("feature '%s' doesn't exist", n.String()))
- }
- return v
-}
-
-// Reset resets the features to their initial state
-func Reset() {
- fMu.Lock()
- defer fMu.Unlock()
- for k, v := range initial {
- features[k] = v
- }
-}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
index fdcfe9a181e..198c09db4ed 100644
--- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go
@@ -9,8 +9,7 @@ import (
"os"
"github.com/letsencrypt/boulder/core"
-
- yaml "gopkg.in/yaml.v3"
+ "github.com/letsencrypt/boulder/strictyaml"
)
// blockedKeys is a type for maintaining a map of SHA256 hashes
@@ -58,7 +57,7 @@ func loadBlockedKeysList(filename string) (*blockedKeys, error) {
BlockedHashes []string `yaml:"blocked"`
BlockedHashesHex []string `yaml:"blockedHashesHex"`
}
- err = yaml.Unmarshal(yamlBytes, &list)
+ err = strictyaml.Unmarshal(yamlBytes, &list)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
index b751c376cd1..087a0181232 100644
--- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
+++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go
@@ -12,10 +12,6 @@ import (
"sync"
"github.com/letsencrypt/boulder/core"
- berrors "github.com/letsencrypt/boulder/errors"
- "github.com/letsencrypt/boulder/features"
- sapb "github.com/letsencrypt/boulder/sa/proto"
- "google.golang.org/grpc"
"github.com/titanous/rocacheck"
)
@@ -68,10 +64,12 @@ func badKey(msg string, args ...interface{}) error {
return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
}
-// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy,
-// rather than storing a full sa.SQLStorageAuthority. This makes testing
+// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy,
+// rather than storing a full sa.SQLStorageAuthority. This allows external
+// users who don’t want to import all of boulder/sa, and makes testing
// significantly simpler.
-type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error)
+// On success, the function returns a boolean which is true if the key is blocked.
+type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
// KeyPolicy determines which types of key may be used with various boulder
// operations.
@@ -82,7 +80,7 @@ type KeyPolicy struct {
weakRSAList *WeakRSAKeys
blockedList *blockedKeys
fermatRounds int
- dbCheck BlockedKeyCheckFunc
+ blockedCheck BlockedKeyCheckFunc
}
// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
@@ -97,7 +95,7 @@ func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
AllowRSA: true,
AllowECDSANISTP256: true,
AllowECDSANISTP384: true,
- dbCheck: bkc,
+ blockedCheck: bkc,
}
if config.WeakKeyFile != "" {
keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
@@ -137,20 +135,20 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro
// that has been administratively blocked.
if policy.blockedList != nil {
if blocked, err := policy.blockedList.blocked(key); err != nil {
- return berrors.InternalServerError("error checking blocklist for key: %v", key)
+ return fmt.Errorf("error checking blocklist for key: %v", key)
} else if blocked {
return badKey("public key is forbidden")
}
}
- if policy.dbCheck != nil {
+ if policy.blockedCheck != nil {
digest, err := core.KeyDigest(key)
if err != nil {
return badKey("%w", err)
}
- exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]})
+ exists, err := policy.blockedCheck(ctx, digest[:])
if err != nil {
return err
- } else if exists.Exists {
+ } else if exists {
return badKey("public key is forbidden")
}
}
@@ -275,6 +273,12 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
}
}
+// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
+// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
+// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
+// have a known method to easily compute their private key, such as Debian Weak
+// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
+// common key sizes, so we restrict all issuance to those common key sizes.
var acceptableRSAKeySizes = map[int]bool{
2048: true,
3072: true,
@@ -290,27 +294,12 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
return badKey("key is on a known weak RSA key list")
}
- // Baseline Requirements Appendix A
- // Modulus must be >= 2048 bits and <= 4096 bits
modulus := key.N
+
+ // See comment on acceptableRSAKeySizes above.
modulusBitLen := modulus.BitLen()
- if features.Enabled(features.RestrictRSAKeySizes) {
- if !acceptableRSAKeySizes[modulusBitLen] {
- return badKey("key size not supported: %d", modulusBitLen)
- }
- } else {
- const maxKeySize = 4096
- if modulusBitLen < 2048 {
- return badKey("key too small: %d", modulusBitLen)
- }
- if modulusBitLen > maxKeySize {
- return badKey("key too large: %d > %d", modulusBitLen, maxKeySize)
- }
- // Bit lengths that are not a multiple of 8 may cause problems on some
- // client implementations.
- if modulusBitLen%8 != 0 {
- return badKey("key length wasn't a multiple of 8: %d", modulusBitLen)
- }
+ if !acceptableRSAKeySizes[modulusBitLen] {
+ return badKey("key size not supported: %d", modulusBitLen)
}
// Rather than support arbitrary exponents, which significantly increases
diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go
index 3736e8d391e..2cc766237dc 100644
--- a/vendor/github.com/letsencrypt/boulder/probs/probs.go
+++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go
@@ -7,29 +7,33 @@ import (
"github.com/letsencrypt/boulder/identifier"
)
-// Error types that can be used in ACME payloads
const (
+ // Error types that can be used in ACME payloads. These are sorted in the
+ // same order as they are defined in RFC8555 Section 6.7. We do not implement
+ // the `compound`, `externalAccountRequired`, or `userActionRequired` errors,
+ // because we have no path that would return them.
+ AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
+ AlreadyRevokedProblem = ProblemType("alreadyRevoked")
+ BadCSRProblem = ProblemType("badCSR")
+ BadNonceProblem = ProblemType("badNonce")
+ BadPublicKeyProblem = ProblemType("badPublicKey")
+ BadRevocationReasonProblem = ProblemType("badRevocationReason")
+ BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
+ CAAProblem = ProblemType("caa")
ConnectionProblem = ProblemType("connection")
+ DNSProblem = ProblemType("dns")
+ InvalidContactProblem = ProblemType("invalidContact")
MalformedProblem = ProblemType("malformed")
+ OrderNotReadyProblem = ProblemType("orderNotReady")
+ RateLimitedProblem = ProblemType("rateLimited")
+ RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
ServerInternalProblem = ProblemType("serverInternal")
TLSProblem = ProblemType("tls")
UnauthorizedProblem = ProblemType("unauthorized")
- RateLimitedProblem = ProblemType("rateLimited")
- BadNonceProblem = ProblemType("badNonce")
- InvalidEmailProblem = ProblemType("invalidEmail")
- RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
- AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
- CAAProblem = ProblemType("caa")
- DNSProblem = ProblemType("dns")
- AlreadyRevokedProblem = ProblemType("alreadyRevoked")
- OrderNotReadyProblem = ProblemType("orderNotReady")
- BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
- BadPublicKeyProblem = ProblemType("badPublicKey")
- BadRevocationReasonProblem = ProblemType("badRevocationReason")
- BadCSRProblem = ProblemType("badCSR")
+ UnsupportedContactProblem = ProblemType("unsupportedContact")
+ UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier")
- V1ErrorNS = "urn:acme:error:"
- V2ErrorNS = "urn:ietf:params:acme:error:"
+ ErrorNS = "urn:ietf:params:acme:error:"
)
// ProblemType defines the error types in the ACME protocol
@@ -71,112 +75,71 @@ func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *Problem
}
}
-// statusTooManyRequests is the HTTP status code meant for rate limiting
-// errors. It's not currently in the net/http library so we add it here.
-const statusTooManyRequests = 429
+// Helper functions which construct the basic RFC8555 Problem Documents, with
+// the Type already set and the Details supplied by the caller.
-// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out
-// what HTTP status code it should represent. It should only be used by the WFE
-// but is included in this package because of its reliance on ProblemTypes.
-func ProblemDetailsToStatusCode(prob *ProblemDetails) int {
- if prob.HTTPStatus != 0 {
- return prob.HTTPStatus
- }
- switch prob.Type {
- case
- ConnectionProblem,
- MalformedProblem,
- BadSignatureAlgorithmProblem,
- BadPublicKeyProblem,
- TLSProblem,
- BadNonceProblem,
- InvalidEmailProblem,
- RejectedIdentifierProblem,
- AccountDoesNotExistProblem,
- BadRevocationReasonProblem:
- return http.StatusBadRequest
- case ServerInternalProblem:
- return http.StatusInternalServerError
- case
- UnauthorizedProblem,
- CAAProblem:
- return http.StatusForbidden
- case RateLimitedProblem:
- return statusTooManyRequests
- default:
- return http.StatusInternalServerError
- }
-}
-
-// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad
-// Request status code.
-func BadNonce(detail string) *ProblemDetails {
+// AccountDoesNotExist returns a ProblemDetails representing an
+// AccountDoesNotExistProblem error
+func AccountDoesNotExist(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: BadNonceProblem,
+ Type: AccountDoesNotExistProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
-// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
+// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
// Request status code.
-func RejectedIdentifier(detail string) *ProblemDetails {
+func AlreadyRevoked(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
- Type: RejectedIdentifierProblem,
- Detail: detail,
+ Type: AlreadyRevokedProblem,
+ Detail: fmt.Sprintf(detail, a...),
HTTPStatus: http.StatusBadRequest,
}
}
-// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict
-// status code.
-func Conflict(detail string) *ProblemDetails {
+// BadCSR returns a ProblemDetails representing a BadCSRProblem.
+func BadCSR(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
- HTTPStatus: http.StatusConflict,
+ Type: BadCSRProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
}
}
-// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
+// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad
// Request status code.
-func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails {
+func BadNonce(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: AlreadyRevokedProblem,
- Detail: fmt.Sprintf(detail, a...),
+ Type: BadNonceProblem,
+ Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
-// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
+// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
// Request status code.
-func Malformed(detail string, args ...interface{}) *ProblemDetails {
- if len(args) > 0 {
- detail = fmt.Sprintf(detail, args...)
- }
+func BadPublicKey(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
+ Type: BadPublicKeyProblem,
+ Detail: fmt.Sprintf(detail, a...),
HTTPStatus: http.StatusBadRequest,
}
}
-// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
-// Timeout status code.
-func Canceled(detail string, args ...interface{}) *ProblemDetails {
- if len(args) > 0 {
- detail = fmt.Sprintf(detail, args...)
- }
+// BadRevocationReason returns a ProblemDetails representing
+// a BadRevocationReasonProblem
+func BadRevocationReason(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
- Type: MalformedProblem,
- Detail: detail,
- HTTPStatus: http.StatusRequestTimeout,
+ Type: BadRevocationReasonProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusBadRequest,
}
}
// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
// and a 400 Bad Request status code.
-func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails {
+func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
Type: BadSignatureAlgorithmProblem,
Detail: fmt.Sprintf(detail, a...),
@@ -184,166 +147,195 @@ func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails {
}
}
-// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
-// Request status code.
-func BadPublicKey(detail string, a ...interface{}) *ProblemDetails {
+// CAA returns a ProblemDetails representing a CAAProblem
+func CAA(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: BadPublicKeyProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
+ Type: CAAProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusForbidden,
}
}
-// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
-// status code.
-func NotFound(detail string) *ProblemDetails {
+// Connection returns a ProblemDetails representing a ConnectionProblem
+// error
+func Connection(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: MalformedProblem,
+ Type: ConnectionProblem,
Detail: detail,
- HTTPStatus: http.StatusNotFound,
+ HTTPStatus: http.StatusBadRequest,
}
}
-// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a
-// 500 Internal Server Failure status code.
-func ServerInternal(detail string) *ProblemDetails {
+// DNS returns a ProblemDetails representing a DNSProblem
+func DNS(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: ServerInternalProblem,
+ Type: DNSProblem,
Detail: detail,
- HTTPStatus: http.StatusInternalServerError,
+ HTTPStatus: http.StatusBadRequest,
}
}
-// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
-// Forbidden status code.
-func Unauthorized(detail string) *ProblemDetails {
+// InvalidContact returns a ProblemDetails representing an InvalidContactProblem.
+func InvalidContact(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: UnauthorizedProblem,
+ Type: InvalidContactProblem,
Detail: detail,
- HTTPStatus: http.StatusForbidden,
+ HTTPStatus: http.StatusBadRequest,
}
}
-// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
-// method error.
-func MethodNotAllowed() *ProblemDetails {
+// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
+// Request status code.
+func Malformed(detail string, a ...any) *ProblemDetails {
+ if len(a) > 0 {
+ detail = fmt.Sprintf(detail, a...)
+ }
return &ProblemDetails{
Type: MalformedProblem,
- Detail: "Method not allowed",
- HTTPStatus: http.StatusMethodNotAllowed,
+ Detail: detail,
+ HTTPStatus: http.StatusBadRequest,
}
}
-// ContentLengthRequired returns a ProblemDetails representing a missing
-// Content-Length header error
-func ContentLengthRequired() *ProblemDetails {
+// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
+func OrderNotReady(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
- Type: MalformedProblem,
- Detail: "missing Content-Length header",
- HTTPStatus: http.StatusLengthRequired,
+ Type: OrderNotReadyProblem,
+ Detail: fmt.Sprintf(detail, a...),
+ HTTPStatus: http.StatusForbidden,
}
}
-// InvalidContentType returns a ProblemDetails suitable for a missing
-// ContentType header, or an incorrect ContentType header
-func InvalidContentType(detail string) *ProblemDetails {
+// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
+func RateLimited(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: MalformedProblem,
+ Type: RateLimitedProblem,
Detail: detail,
- HTTPStatus: http.StatusUnsupportedMediaType,
+ HTTPStatus: http.StatusTooManyRequests,
}
}
-// InvalidEmail returns a ProblemDetails representing an invalid email address
-// error
-func InvalidEmail(detail string) *ProblemDetails {
+// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
+// Request status code.
+func RejectedIdentifier(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: InvalidEmailProblem,
+ Type: RejectedIdentifierProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
-// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem
-// error
-func ConnectionFailure(detail string) *ProblemDetails {
+// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a
+// 500 Internal Server Failure status code.
+func ServerInternal(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: ConnectionProblem,
+ Type: ServerInternalProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusInternalServerError,
+ }
+}
+
+// TLS returns a ProblemDetails representing a TLSProblem error
+func TLS(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: TLSProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
-// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
-func RateLimited(detail string) *ProblemDetails {
+// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
+// Forbidden status code.
+func Unauthorized(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: RateLimitedProblem,
+ Type: UnauthorizedProblem,
Detail: detail,
- HTTPStatus: statusTooManyRequests,
+ HTTPStatus: http.StatusForbidden,
}
}
-// TLSError returns a ProblemDetails representing a TLSProblem error
-func TLSError(detail string) *ProblemDetails {
+// UnsupportedContact returns a ProblemDetails representing an
+// UnsupportedContactProblem
+func UnsupportedContact(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: TLSProblem,
+ Type: UnsupportedContactProblem,
Detail: detail,
HTTPStatus: http.StatusBadRequest,
}
}
-// AccountDoesNotExist returns a ProblemDetails representing an
-// AccountDoesNotExistProblem error
-func AccountDoesNotExist(detail string) *ProblemDetails {
+// UnsupportedIdentifier returns a ProblemDetails representing an
+// UnsupportedIdentifierProblem
+func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails {
return &ProblemDetails{
- Type: AccountDoesNotExistProblem,
- Detail: detail,
+ Type: UnsupportedIdentifierProblem,
+ Detail: fmt.Sprintf(detail, a...),
HTTPStatus: http.StatusBadRequest,
}
}
-// CAA returns a ProblemDetails representing a CAAProblem
-func CAA(detail string) *ProblemDetails {
+// Additional helper functions that return variations on MalformedProblem with
+// different HTTP status codes set.
+
+// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
+// Timeout status code.
+func Canceled(detail string, a ...any) *ProblemDetails {
+ if len(a) > 0 {
+ detail = fmt.Sprintf(detail, a...)
+ }
return &ProblemDetails{
- Type: CAAProblem,
+ Type: MalformedProblem,
Detail: detail,
- HTTPStatus: http.StatusForbidden,
+ HTTPStatus: http.StatusRequestTimeout,
}
}
-// DNS returns a ProblemDetails representing a DNSProblem
-func DNS(detail string) *ProblemDetails {
+// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict
+// status code.
+func Conflict(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: DNSProblem,
+ Type: MalformedProblem,
Detail: detail,
- HTTPStatus: http.StatusBadRequest,
+ HTTPStatus: http.StatusConflict,
}
}
-// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
-func OrderNotReady(detail string, a ...interface{}) *ProblemDetails {
+// ContentLengthRequired returns a ProblemDetails representing a missing
+// Content-Length header error
+func ContentLengthRequired() *ProblemDetails {
return &ProblemDetails{
- Type: OrderNotReadyProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusForbidden,
+ Type: MalformedProblem,
+ Detail: "missing Content-Length header",
+ HTTPStatus: http.StatusLengthRequired,
}
}
-// BadRevocationReason returns a ProblemDetails representing
-// a BadRevocationReasonProblem
-func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails {
+// InvalidContentType returns a ProblemDetails suitable for a missing
+// ContentType header, or an incorrect ContentType header
+func InvalidContentType(detail string) *ProblemDetails {
return &ProblemDetails{
- Type: BadRevocationReasonProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusUnsupportedMediaType,
}
}
-// BadCSR returns a ProblemDetails representing a BadCSRProblem.
-func BadCSR(detail string, a ...interface{}) *ProblemDetails {
+// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
+// method error.
+func MethodNotAllowed() *ProblemDetails {
return &ProblemDetails{
- Type: BadCSRProblem,
- Detail: fmt.Sprintf(detail, a...),
- HTTPStatus: http.StatusBadRequest,
+ Type: MalformedProblem,
+ Detail: "Method not allowed",
+ HTTPStatus: http.StatusMethodNotAllowed,
+ }
+}
+
+// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
+// status code.
+func NotFound(detail string) *ProblemDetails {
+ return &ProblemDetails{
+ Type: MalformedProblem,
+ Detail: detail,
+ HTTPStatus: http.StatusNotFound,
}
}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go
deleted file mode 100644
index 27c5d18b077..00000000000
--- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go
+++ /dev/null
@@ -1,4261 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.0
-// protoc v3.20.1
-// source: sa.proto
-
-package proto
-
-import (
- proto "github.com/letsencrypt/boulder/core/proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type RegistrationID struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
-}
-
-func (x *RegistrationID) Reset() {
- *x = RegistrationID{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RegistrationID) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RegistrationID) ProtoMessage() {}
-
-func (x *RegistrationID) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RegistrationID.ProtoReflect.Descriptor instead.
-func (*RegistrationID) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *RegistrationID) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-type JSONWebKey struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"`
-}
-
-func (x *JSONWebKey) Reset() {
- *x = JSONWebKey{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *JSONWebKey) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*JSONWebKey) ProtoMessage() {}
-
-func (x *JSONWebKey) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use JSONWebKey.ProtoReflect.Descriptor instead.
-func (*JSONWebKey) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *JSONWebKey) GetJwk() []byte {
- if x != nil {
- return x.Jwk
- }
- return nil
-}
-
-type AuthorizationID struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
-}
-
-func (x *AuthorizationID) Reset() {
- *x = AuthorizationID{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AuthorizationID) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AuthorizationID) ProtoMessage() {}
-
-func (x *AuthorizationID) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AuthorizationID.ProtoReflect.Descriptor instead.
-func (*AuthorizationID) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *AuthorizationID) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-type GetPendingAuthorizationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- IdentifierType string `protobuf:"bytes,2,opt,name=identifierType,proto3" json:"identifierType,omitempty"`
- IdentifierValue string `protobuf:"bytes,3,opt,name=identifierValue,proto3" json:"identifierValue,omitempty"`
- // Result must be valid until at least this Unix timestamp (nanos)
- ValidUntil int64 `protobuf:"varint,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"`
-}
-
-func (x *GetPendingAuthorizationRequest) Reset() {
- *x = GetPendingAuthorizationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetPendingAuthorizationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetPendingAuthorizationRequest) ProtoMessage() {}
-
-func (x *GetPendingAuthorizationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetPendingAuthorizationRequest.ProtoReflect.Descriptor instead.
-func (*GetPendingAuthorizationRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *GetPendingAuthorizationRequest) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *GetPendingAuthorizationRequest) GetIdentifierType() string {
- if x != nil {
- return x.IdentifierType
- }
- return ""
-}
-
-func (x *GetPendingAuthorizationRequest) GetIdentifierValue() string {
- if x != nil {
- return x.IdentifierValue
- }
- return ""
-}
-
-func (x *GetPendingAuthorizationRequest) GetValidUntil() int64 {
- if x != nil {
- return x.ValidUntil
- }
- return 0
-}
-
-type GetValidAuthorizationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
- Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *GetValidAuthorizationsRequest) Reset() {
- *x = GetValidAuthorizationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetValidAuthorizationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetValidAuthorizationsRequest) ProtoMessage() {}
-
-func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetValidAuthorizationsRequest.ProtoReflect.Descriptor instead.
-func (*GetValidAuthorizationsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *GetValidAuthorizationsRequest) GetDomains() []string {
- if x != nil {
- return x.Domains
- }
- return nil
-}
-
-func (x *GetValidAuthorizationsRequest) GetNow() int64 {
- if x != nil {
- return x.Now
- }
- return 0
-}
-
-type ValidAuthorizations struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Valid []*ValidAuthorizations_MapElement `protobuf:"bytes,1,rep,name=valid,proto3" json:"valid,omitempty"`
-}
-
-func (x *ValidAuthorizations) Reset() {
- *x = ValidAuthorizations{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidAuthorizations) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidAuthorizations) ProtoMessage() {}
-
-func (x *ValidAuthorizations) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidAuthorizations.ProtoReflect.Descriptor instead.
-func (*ValidAuthorizations) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *ValidAuthorizations) GetValid() []*ValidAuthorizations_MapElement {
- if x != nil {
- return x.Valid
- }
- return nil
-}
-
-type Serial struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
-}
-
-func (x *Serial) Reset() {
- *x = Serial{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Serial) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Serial) ProtoMessage() {}
-
-func (x *Serial) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Serial.ProtoReflect.Descriptor instead.
-func (*Serial) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *Serial) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-type SerialMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
- RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds)
- Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *SerialMetadata) Reset() {
- *x = SerialMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SerialMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SerialMetadata) ProtoMessage() {}
-
-func (x *SerialMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SerialMetadata.ProtoReflect.Descriptor instead.
-func (*SerialMetadata) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *SerialMetadata) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *SerialMetadata) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *SerialMetadata) GetCreated() int64 {
- if x != nil {
- return x.Created
- }
- return 0
-}
-
-func (x *SerialMetadata) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-type Range struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Earliest int64 `protobuf:"varint,1,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds)
- Latest int64 `protobuf:"varint,2,opt,name=latest,proto3" json:"latest,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *Range) Reset() {
- *x = Range{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Range) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Range) ProtoMessage() {}
-
-func (x *Range) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Range.ProtoReflect.Descriptor instead.
-func (*Range) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *Range) GetEarliest() int64 {
- if x != nil {
- return x.Earliest
- }
- return 0
-}
-
-func (x *Range) GetLatest() int64 {
- if x != nil {
- return x.Latest
- }
- return 0
-}
-
-type Count struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
-}
-
-func (x *Count) Reset() {
- *x = Count{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Count) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Count) ProtoMessage() {}
-
-func (x *Count) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Count.ProtoReflect.Descriptor instead.
-func (*Count) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *Count) GetCount() int64 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-type Timestamps struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Timestamps []int64 `protobuf:"varint,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *Timestamps) Reset() {
- *x = Timestamps{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Timestamps) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Timestamps) ProtoMessage() {}
-
-func (x *Timestamps) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Timestamps.ProtoReflect.Descriptor instead.
-func (*Timestamps) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *Timestamps) GetTimestamps() []int64 {
- if x != nil {
- return x.Timestamps
- }
- return nil
-}
-
-type CountCertificatesByNamesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Range *Range `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
- Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"`
-}
-
-func (x *CountCertificatesByNamesRequest) Reset() {
- *x = CountCertificatesByNamesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CountCertificatesByNamesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CountCertificatesByNamesRequest) ProtoMessage() {}
-
-func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CountCertificatesByNamesRequest.ProtoReflect.Descriptor instead.
-func (*CountCertificatesByNamesRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *CountCertificatesByNamesRequest) GetRange() *Range {
- if x != nil {
- return x.Range
- }
- return nil
-}
-
-func (x *CountCertificatesByNamesRequest) GetNames() []string {
- if x != nil {
- return x.Names
- }
- return nil
-}
-
-type CountByNames struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Counts map[string]int64 `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
- Earliest *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *CountByNames) Reset() {
- *x = CountByNames{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CountByNames) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CountByNames) ProtoMessage() {}
-
-func (x *CountByNames) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CountByNames.ProtoReflect.Descriptor instead.
-func (*CountByNames) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *CountByNames) GetCounts() map[string]int64 {
- if x != nil {
- return x.Counts
- }
- return nil
-}
-
-func (x *CountByNames) GetEarliest() *timestamppb.Timestamp {
- if x != nil {
- return x.Earliest
- }
- return nil
-}
-
-type CountRegistrationsByIPRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"`
- Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"`
-}
-
-func (x *CountRegistrationsByIPRequest) Reset() {
- *x = CountRegistrationsByIPRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CountRegistrationsByIPRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CountRegistrationsByIPRequest) ProtoMessage() {}
-
-func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CountRegistrationsByIPRequest.ProtoReflect.Descriptor instead.
-func (*CountRegistrationsByIPRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *CountRegistrationsByIPRequest) GetIp() []byte {
- if x != nil {
- return x.Ip
- }
- return nil
-}
-
-func (x *CountRegistrationsByIPRequest) GetRange() *Range {
- if x != nil {
- return x.Range
- }
- return nil
-}
-
-type CountInvalidAuthorizationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"`
- // Count authorizations that expire in this range.
- Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
-}
-
-func (x *CountInvalidAuthorizationsRequest) Reset() {
- *x = CountInvalidAuthorizationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CountInvalidAuthorizationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CountInvalidAuthorizationsRequest) ProtoMessage() {}
-
-func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead.
-func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *CountInvalidAuthorizationsRequest) GetHostname() string {
- if x != nil {
- return x.Hostname
- }
- return ""
-}
-
-func (x *CountInvalidAuthorizationsRequest) GetRange() *Range {
- if x != nil {
- return x.Range
- }
- return nil
-}
-
-type CountOrdersRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AccountID int64 `protobuf:"varint,1,opt,name=accountID,proto3" json:"accountID,omitempty"`
- Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"`
-}
-
-func (x *CountOrdersRequest) Reset() {
- *x = CountOrdersRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CountOrdersRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CountOrdersRequest) ProtoMessage() {}
-
-func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CountOrdersRequest.ProtoReflect.Descriptor instead.
-func (*CountOrdersRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *CountOrdersRequest) GetAccountID() int64 {
- if x != nil {
- return x.AccountID
- }
- return 0
-}
-
-func (x *CountOrdersRequest) GetRange() *Range {
- if x != nil {
- return x.Range
- }
- return nil
-}
-
-type CountFQDNSetsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Window int64 `protobuf:"varint,1,opt,name=window,proto3" json:"window,omitempty"`
- Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
-}
-
-func (x *CountFQDNSetsRequest) Reset() {
- *x = CountFQDNSetsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CountFQDNSetsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CountFQDNSetsRequest) ProtoMessage() {}
-
-func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead.
-func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *CountFQDNSetsRequest) GetWindow() int64 {
- if x != nil {
- return x.Window
- }
- return 0
-}
-
-func (x *CountFQDNSetsRequest) GetDomains() []string {
- if x != nil {
- return x.Domains
- }
- return nil
-}
-
-type FQDNSetExistsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"`
-}
-
-func (x *FQDNSetExistsRequest) Reset() {
- *x = FQDNSetExistsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FQDNSetExistsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FQDNSetExistsRequest) ProtoMessage() {}
-
-func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead.
-func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *FQDNSetExistsRequest) GetDomains() []string {
- if x != nil {
- return x.Domains
- }
- return nil
-}
-
-type PreviousCertificateExistsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
- RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
-}
-
-func (x *PreviousCertificateExistsRequest) Reset() {
- *x = PreviousCertificateExistsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PreviousCertificateExistsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PreviousCertificateExistsRequest) ProtoMessage() {}
-
-func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PreviousCertificateExistsRequest.ProtoReflect.Descriptor instead.
-func (*PreviousCertificateExistsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *PreviousCertificateExistsRequest) GetDomain() string {
- if x != nil {
- return x.Domain
- }
- return ""
-}
-
-func (x *PreviousCertificateExistsRequest) GetRegID() int64 {
- if x != nil {
- return x.RegID
- }
- return 0
-}
-
-type Exists struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
-}
-
-func (x *Exists) Reset() {
- *x = Exists{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Exists) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Exists) ProtoMessage() {}
-
-func (x *Exists) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Exists.ProtoReflect.Descriptor instead.
-func (*Exists) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *Exists) GetExists() bool {
- if x != nil {
- return x.Exists
- }
- return false
-}
-
-type AddSerialRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"`
- Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"`
- Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds)
- Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *AddSerialRequest) Reset() {
- *x = AddSerialRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddSerialRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddSerialRequest) ProtoMessage() {}
-
-func (x *AddSerialRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead.
-func (*AddSerialRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *AddSerialRequest) GetRegID() int64 {
- if x != nil {
- return x.RegID
- }
- return 0
-}
-
-func (x *AddSerialRequest) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *AddSerialRequest) GetCreated() int64 {
- if x != nil {
- return x.Created
- }
- return 0
-}
-
-func (x *AddSerialRequest) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-type AddCertificateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"`
- RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"`
- // A signed OCSP response for the certificate contained in "der".
- // Note: The certificate status in the OCSP response is assumed to be 0 (good).
- Ocsp []byte `protobuf:"bytes,3,opt,name=ocsp,proto3" json:"ocsp,omitempty"`
- // An issued time. When not present the SA defaults to using
- // the current time. The orphan-finder uses this parameter to add
- // certificates with the correct historic issued date
- Issued int64 `protobuf:"varint,4,opt,name=issued,proto3" json:"issued,omitempty"`
- IssuerID int64 `protobuf:"varint,5,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
-}
-
-func (x *AddCertificateRequest) Reset() {
- *x = AddCertificateRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddCertificateRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddCertificateRequest) ProtoMessage() {}
-
-func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead.
-func (*AddCertificateRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{21}
-}
-
-func (x *AddCertificateRequest) GetDer() []byte {
- if x != nil {
- return x.Der
- }
- return nil
-}
-
-func (x *AddCertificateRequest) GetRegID() int64 {
- if x != nil {
- return x.RegID
- }
- return 0
-}
-
-func (x *AddCertificateRequest) GetOcsp() []byte {
- if x != nil {
- return x.Ocsp
- }
- return nil
-}
-
-func (x *AddCertificateRequest) GetIssued() int64 {
- if x != nil {
- return x.Issued
- }
- return 0
-}
-
-func (x *AddCertificateRequest) GetIssuerID() int64 {
- if x != nil {
- return x.IssuerID
- }
- return 0
-}
-
-type AddCertificateResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
-}
-
-func (x *AddCertificateResponse) Reset() {
- *x = AddCertificateResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddCertificateResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddCertificateResponse) ProtoMessage() {}
-
-func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddCertificateResponse.ProtoReflect.Descriptor instead.
-func (*AddCertificateResponse) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{22}
-}
-
-func (x *AddCertificateResponse) GetDigest() string {
- if x != nil {
- return x.Digest
- }
- return ""
-}
-
-type OrderRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
-}
-
-func (x *OrderRequest) Reset() {
- *x = OrderRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OrderRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OrderRequest) ProtoMessage() {}
-
-func (x *OrderRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead.
-func (*OrderRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{23}
-}
-
-func (x *OrderRequest) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-type NewOrderRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
- Names []string `protobuf:"bytes,3,rep,name=names,proto3" json:"names,omitempty"`
- V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"`
-}
-
-func (x *NewOrderRequest) Reset() {
- *x = NewOrderRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NewOrderRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NewOrderRequest) ProtoMessage() {}
-
-func (x *NewOrderRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead.
-func (*NewOrderRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{24}
-}
-
-func (x *NewOrderRequest) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *NewOrderRequest) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-func (x *NewOrderRequest) GetNames() []string {
- if x != nil {
- return x.Names
- }
- return nil
-}
-
-func (x *NewOrderRequest) GetV2Authorizations() []int64 {
- if x != nil {
- return x.V2Authorizations
- }
- return nil
-}
-
-type NewOrderAndAuthzsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"`
- NewAuthzs []*proto.Authorization `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"`
-}
-
-func (x *NewOrderAndAuthzsRequest) Reset() {
- *x = NewOrderAndAuthzsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NewOrderAndAuthzsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NewOrderAndAuthzsRequest) ProtoMessage() {}
-
-func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead.
-func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{25}
-}
-
-func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest {
- if x != nil {
- return x.NewOrder
- }
- return nil
-}
-
-func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*proto.Authorization {
- if x != nil {
- return x.NewAuthzs
- }
- return nil
-}
-
-type SetOrderErrorRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *SetOrderErrorRequest) Reset() {
- *x = SetOrderErrorRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SetOrderErrorRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SetOrderErrorRequest) ProtoMessage() {}
-
-func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead.
-func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{26}
-}
-
-func (x *SetOrderErrorRequest) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails {
- if x != nil {
- return x.Error
- }
- return nil
-}
-
-type GetValidOrderAuthorizationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"`
-}
-
-func (x *GetValidOrderAuthorizationsRequest) Reset() {
- *x = GetValidOrderAuthorizationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetValidOrderAuthorizationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {}
-
-func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead.
-func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{27}
-}
-
-func (x *GetValidOrderAuthorizationsRequest) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *GetValidOrderAuthorizationsRequest) GetAcctID() int64 {
- if x != nil {
- return x.AcctID
- }
- return 0
-}
-
-type GetOrderForNamesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AcctID int64 `protobuf:"varint,1,opt,name=acctID,proto3" json:"acctID,omitempty"`
- Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"`
-}
-
-func (x *GetOrderForNamesRequest) Reset() {
- *x = GetOrderForNamesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOrderForNamesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOrderForNamesRequest) ProtoMessage() {}
-
-func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead.
-func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{28}
-}
-
-func (x *GetOrderForNamesRequest) GetAcctID() int64 {
- if x != nil {
- return x.AcctID
- }
- return 0
-}
-
-func (x *GetOrderForNamesRequest) GetNames() []string {
- if x != nil {
- return x.Names
- }
- return nil
-}
-
-type FinalizeOrderRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- CertificateSerial string `protobuf:"bytes,2,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"`
-}
-
-func (x *FinalizeOrderRequest) Reset() {
- *x = FinalizeOrderRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FinalizeOrderRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FinalizeOrderRequest) ProtoMessage() {}
-
-func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead.
-func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{29}
-}
-
-func (x *FinalizeOrderRequest) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *FinalizeOrderRequest) GetCertificateSerial() string {
- if x != nil {
- return x.CertificateSerial
- }
- return ""
-}
-
-type GetAuthorizationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
- Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *GetAuthorizationsRequest) Reset() {
- *x = GetAuthorizationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetAuthorizationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetAuthorizationsRequest) ProtoMessage() {}
-
-func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead.
-func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{30}
-}
-
-func (x *GetAuthorizationsRequest) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *GetAuthorizationsRequest) GetDomains() []string {
- if x != nil {
- return x.Domains
- }
- return nil
-}
-
-func (x *GetAuthorizationsRequest) GetNow() int64 {
- if x != nil {
- return x.Now
- }
- return 0
-}
-
-type Authorizations struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Authz []*Authorizations_MapElement `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"`
-}
-
-func (x *Authorizations) Reset() {
- *x = Authorizations{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Authorizations) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Authorizations) ProtoMessage() {}
-
-func (x *Authorizations) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Authorizations.ProtoReflect.Descriptor instead.
-func (*Authorizations) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{31}
-}
-
-func (x *Authorizations) GetAuthz() []*Authorizations_MapElement {
- if x != nil {
- return x.Authz
- }
- return nil
-}
-
-type AddPendingAuthorizationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Authz []*proto.Authorization `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"`
-}
-
-func (x *AddPendingAuthorizationsRequest) Reset() {
- *x = AddPendingAuthorizationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddPendingAuthorizationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddPendingAuthorizationsRequest) ProtoMessage() {}
-
-func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddPendingAuthorizationsRequest.ProtoReflect.Descriptor instead.
-func (*AddPendingAuthorizationsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{32}
-}
-
-func (x *AddPendingAuthorizationsRequest) GetAuthz() []*proto.Authorization {
- if x != nil {
- return x.Authz
- }
- return nil
-}
-
-type AuthorizationIDs struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
-}
-
-func (x *AuthorizationIDs) Reset() {
- *x = AuthorizationIDs{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AuthorizationIDs) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AuthorizationIDs) ProtoMessage() {}
-
-func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead.
-func (*AuthorizationIDs) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{33}
-}
-
-func (x *AuthorizationIDs) GetIds() []string {
- if x != nil {
- return x.Ids
- }
- return nil
-}
-
-type AuthorizationID2 struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
-}
-
-func (x *AuthorizationID2) Reset() {
- *x = AuthorizationID2{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AuthorizationID2) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AuthorizationID2) ProtoMessage() {}
-
-func (x *AuthorizationID2) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead.
-func (*AuthorizationID2) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{34}
-}
-
-func (x *AuthorizationID2) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-type Authorization2IDs struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"`
-}
-
-func (x *Authorization2IDs) Reset() {
- *x = Authorization2IDs{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Authorization2IDs) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Authorization2IDs) ProtoMessage() {}
-
-func (x *Authorization2IDs) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Authorization2IDs.ProtoReflect.Descriptor instead.
-func (*Authorization2IDs) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{35}
-}
-
-func (x *Authorization2IDs) GetIds() []int64 {
- if x != nil {
- return x.Ids
- }
- return nil
-}
-
-type RevokeCertificateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
- Reason int64 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"`
- Date int64 `protobuf:"varint,3,opt,name=date,proto3" json:"date,omitempty"` // Unix timestamp (nanoseconds)
- Backdate int64 `protobuf:"varint,5,opt,name=backdate,proto3" json:"backdate,omitempty"` // Unix timestamp (nanoseconds)
- Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"`
- IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"`
-}
-
-func (x *RevokeCertificateRequest) Reset() {
- *x = RevokeCertificateRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RevokeCertificateRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RevokeCertificateRequest) ProtoMessage() {}
-
-func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead.
-func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{36}
-}
-
-func (x *RevokeCertificateRequest) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *RevokeCertificateRequest) GetReason() int64 {
- if x != nil {
- return x.Reason
- }
- return 0
-}
-
-func (x *RevokeCertificateRequest) GetDate() int64 {
- if x != nil {
- return x.Date
- }
- return 0
-}
-
-func (x *RevokeCertificateRequest) GetBackdate() int64 {
- if x != nil {
- return x.Backdate
- }
- return 0
-}
-
-func (x *RevokeCertificateRequest) GetResponse() []byte {
- if x != nil {
- return x.Response
- }
- return nil
-}
-
-func (x *RevokeCertificateRequest) GetIssuerID() int64 {
- if x != nil {
- return x.IssuerID
- }
- return 0
-}
-
-type FinalizeAuthorizationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
- Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds)
- Attempted string `protobuf:"bytes,4,opt,name=attempted,proto3" json:"attempted,omitempty"`
- ValidationRecords []*proto.ValidationRecord `protobuf:"bytes,5,rep,name=validationRecords,proto3" json:"validationRecords,omitempty"`
- ValidationError *proto.ProblemDetails `protobuf:"bytes,6,opt,name=validationError,proto3" json:"validationError,omitempty"`
- AttemptedAt int64 `protobuf:"varint,7,opt,name=attemptedAt,proto3" json:"attemptedAt,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *FinalizeAuthorizationRequest) Reset() {
- *x = FinalizeAuthorizationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FinalizeAuthorizationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FinalizeAuthorizationRequest) ProtoMessage() {}
-
-func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead.
-func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{37}
-}
-
-func (x *FinalizeAuthorizationRequest) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *FinalizeAuthorizationRequest) GetStatus() string {
- if x != nil {
- return x.Status
- }
- return ""
-}
-
-func (x *FinalizeAuthorizationRequest) GetExpires() int64 {
- if x != nil {
- return x.Expires
- }
- return 0
-}
-
-func (x *FinalizeAuthorizationRequest) GetAttempted() string {
- if x != nil {
- return x.Attempted
- }
- return ""
-}
-
-func (x *FinalizeAuthorizationRequest) GetValidationRecords() []*proto.ValidationRecord {
- if x != nil {
- return x.ValidationRecords
- }
- return nil
-}
-
-func (x *FinalizeAuthorizationRequest) GetValidationError() *proto.ProblemDetails {
- if x != nil {
- return x.ValidationError
- }
- return nil
-}
-
-func (x *FinalizeAuthorizationRequest) GetAttemptedAt() int64 {
- if x != nil {
- return x.AttemptedAt
- }
- return 0
-}
-
-type AddBlockedKeyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"`
- Added int64 `protobuf:"varint,2,opt,name=added,proto3" json:"added,omitempty"` // Unix timestamp (nanoseconds)
- Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
- Comment string `protobuf:"bytes,4,opt,name=comment,proto3" json:"comment,omitempty"`
- RevokedBy int64 `protobuf:"varint,5,opt,name=revokedBy,proto3" json:"revokedBy,omitempty"`
-}
-
-func (x *AddBlockedKeyRequest) Reset() {
- *x = AddBlockedKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AddBlockedKeyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AddBlockedKeyRequest) ProtoMessage() {}
-
-func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead.
-func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{38}
-}
-
-func (x *AddBlockedKeyRequest) GetKeyHash() []byte {
- if x != nil {
- return x.KeyHash
- }
- return nil
-}
-
-func (x *AddBlockedKeyRequest) GetAdded() int64 {
- if x != nil {
- return x.Added
- }
- return 0
-}
-
-func (x *AddBlockedKeyRequest) GetSource() string {
- if x != nil {
- return x.Source
- }
- return ""
-}
-
-func (x *AddBlockedKeyRequest) GetComment() string {
- if x != nil {
- return x.Comment
- }
- return ""
-}
-
-func (x *AddBlockedKeyRequest) GetRevokedBy() int64 {
- if x != nil {
- return x.RevokedBy
- }
- return 0
-}
-
-type KeyBlockedRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"`
-}
-
-func (x *KeyBlockedRequest) Reset() {
- *x = KeyBlockedRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *KeyBlockedRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*KeyBlockedRequest) ProtoMessage() {}
-
-func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use KeyBlockedRequest.ProtoReflect.Descriptor instead.
-func (*KeyBlockedRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{39}
-}
-
-func (x *KeyBlockedRequest) GetKeyHash() []byte {
- if x != nil {
- return x.KeyHash
- }
- return nil
-}
-
-type Incident struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- SerialTable string `protobuf:"bytes,2,opt,name=serialTable,proto3" json:"serialTable,omitempty"`
- Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"`
- RenewBy int64 `protobuf:"varint,4,opt,name=renewBy,proto3" json:"renewBy,omitempty"` // Unix timestamp (nanoseconds)
- Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"`
-}
-
-func (x *Incident) Reset() {
- *x = Incident{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Incident) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Incident) ProtoMessage() {}
-
-func (x *Incident) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Incident.ProtoReflect.Descriptor instead.
-func (*Incident) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{40}
-}
-
-func (x *Incident) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *Incident) GetSerialTable() string {
- if x != nil {
- return x.SerialTable
- }
- return ""
-}
-
-func (x *Incident) GetUrl() string {
- if x != nil {
- return x.Url
- }
- return ""
-}
-
-func (x *Incident) GetRenewBy() int64 {
- if x != nil {
- return x.RenewBy
- }
- return 0
-}
-
-func (x *Incident) GetEnabled() bool {
- if x != nil {
- return x.Enabled
- }
- return false
-}
-
-type Incidents struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Incidents []*Incident `protobuf:"bytes,1,rep,name=incidents,proto3" json:"incidents,omitempty"`
-}
-
-func (x *Incidents) Reset() {
- *x = Incidents{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Incidents) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Incidents) ProtoMessage() {}
-
-func (x *Incidents) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Incidents.ProtoReflect.Descriptor instead.
-func (*Incidents) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{41}
-}
-
-func (x *Incidents) GetIncidents() []*Incident {
- if x != nil {
- return x.Incidents
- }
- return nil
-}
-
-type SerialsForIncidentRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- IncidentTable string `protobuf:"bytes,1,opt,name=incidentTable,proto3" json:"incidentTable,omitempty"`
-}
-
-func (x *SerialsForIncidentRequest) Reset() {
- *x = SerialsForIncidentRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SerialsForIncidentRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SerialsForIncidentRequest) ProtoMessage() {}
-
-func (x *SerialsForIncidentRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SerialsForIncidentRequest.ProtoReflect.Descriptor instead.
-func (*SerialsForIncidentRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{42}
-}
-
-func (x *SerialsForIncidentRequest) GetIncidentTable() string {
- if x != nil {
- return x.IncidentTable
- }
- return ""
-}
-
-type IncidentSerial struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"`
- RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"`
- OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"`
- LastNoticeSent int64 `protobuf:"varint,4,opt,name=lastNoticeSent,proto3" json:"lastNoticeSent,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *IncidentSerial) Reset() {
- *x = IncidentSerial{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *IncidentSerial) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*IncidentSerial) ProtoMessage() {}
-
-func (x *IncidentSerial) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use IncidentSerial.ProtoReflect.Descriptor instead.
-func (*IncidentSerial) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{43}
-}
-
-func (x *IncidentSerial) GetSerial() string {
- if x != nil {
- return x.Serial
- }
- return ""
-}
-
-func (x *IncidentSerial) GetRegistrationID() int64 {
- if x != nil {
- return x.RegistrationID
- }
- return 0
-}
-
-func (x *IncidentSerial) GetOrderID() int64 {
- if x != nil {
- return x.OrderID
- }
- return 0
-}
-
-func (x *IncidentSerial) GetLastNoticeSent() int64 {
- if x != nil {
- return x.LastNoticeSent
- }
- return 0
-}
-
-type GetRevokedCertsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"`
- ExpiresAfter int64 `protobuf:"varint,2,opt,name=expiresAfter,proto3" json:"expiresAfter,omitempty"` // Unix timestamp (nanoseconds), inclusive
- ExpiresBefore int64 `protobuf:"varint,3,opt,name=expiresBefore,proto3" json:"expiresBefore,omitempty"` // Unix timestamp (nanoseconds), exclusive
- RevokedBefore int64 `protobuf:"varint,4,opt,name=revokedBefore,proto3" json:"revokedBefore,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *GetRevokedCertsRequest) Reset() {
- *x = GetRevokedCertsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetRevokedCertsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetRevokedCertsRequest) ProtoMessage() {}
-
-func (x *GetRevokedCertsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetRevokedCertsRequest.ProtoReflect.Descriptor instead.
-func (*GetRevokedCertsRequest) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{44}
-}
-
-func (x *GetRevokedCertsRequest) GetIssuerNameID() int64 {
- if x != nil {
- return x.IssuerNameID
- }
- return 0
-}
-
-func (x *GetRevokedCertsRequest) GetExpiresAfter() int64 {
- if x != nil {
- return x.ExpiresAfter
- }
- return 0
-}
-
-func (x *GetRevokedCertsRequest) GetExpiresBefore() int64 {
- if x != nil {
- return x.ExpiresBefore
- }
- return 0
-}
-
-func (x *GetRevokedCertsRequest) GetRevokedBefore() int64 {
- if x != nil {
- return x.RevokedBefore
- }
- return 0
-}
-
-type RevocationStatus struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Status int64 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"`
- RevokedReason int64 `protobuf:"varint,2,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"`
- RevokedDate *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"` // Unix timestamp (nanoseconds)
-}
-
-func (x *RevocationStatus) Reset() {
- *x = RevocationStatus{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RevocationStatus) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RevocationStatus) ProtoMessage() {}
-
-func (x *RevocationStatus) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RevocationStatus.ProtoReflect.Descriptor instead.
-func (*RevocationStatus) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{45}
-}
-
-func (x *RevocationStatus) GetStatus() int64 {
- if x != nil {
- return x.Status
- }
- return 0
-}
-
-func (x *RevocationStatus) GetRevokedReason() int64 {
- if x != nil {
- return x.RevokedReason
- }
- return 0
-}
-
-func (x *RevocationStatus) GetRevokedDate() *timestamppb.Timestamp {
- if x != nil {
- return x.RevokedDate
- }
- return nil
-}
-
-type ValidAuthorizations_MapElement struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
- Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"`
-}
-
-func (x *ValidAuthorizations_MapElement) Reset() {
- *x = ValidAuthorizations_MapElement{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidAuthorizations_MapElement) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidAuthorizations_MapElement) ProtoMessage() {}
-
-func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead.
-func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{5, 0}
-}
-
-func (x *ValidAuthorizations_MapElement) GetDomain() string {
- if x != nil {
- return x.Domain
- }
- return ""
-}
-
-func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization {
- if x != nil {
- return x.Authz
- }
- return nil
-}
-
-type Authorizations_MapElement struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"`
- Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"`
-}
-
-func (x *Authorizations_MapElement) Reset() {
- *x = Authorizations_MapElement{}
- if protoimpl.UnsafeEnabled {
- mi := &file_sa_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Authorizations_MapElement) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Authorizations_MapElement) ProtoMessage() {}
-
-func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message {
- mi := &file_sa_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Authorizations_MapElement.ProtoReflect.Descriptor instead.
-func (*Authorizations_MapElement) Descriptor() ([]byte, []int) {
- return file_sa_proto_rawDescGZIP(), []int{31, 0}
-}
-
-func (x *Authorizations_MapElement) GetDomain() string {
- if x != nil {
- return x.Domain
- }
- return ""
-}
-
-func (x *Authorizations_MapElement) GetAuthz() *proto.Authorization {
- if x != nil {
- return x.Authz
- }
- return nil
-}
-
-var File_sa_proto protoreflect.FileDescriptor
-
-var file_sa_proto_rawDesc = []byte{
- 0x0a, 0x08, 0x73, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x73, 0x61, 0x1a, 0x15,
- 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62,
- 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x03, 0x6a, 0x77, 0x6b, 0x22, 0x21, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
- 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74,
- 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72,
- 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65,
- 0x72, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x69,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e,
- 0x74, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64,
- 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x73, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e,
- 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18,
- 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x1a, 0x4f, 0x0a, 0x0a,
- 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f,
- 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61,
- 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
- 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x20, 0x0a,
- 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61,
- 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22,
- 0x84, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65,
- 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07,
- 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65,
- 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x1a, 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c,
- 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x61, 0x74,
- 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05,
- 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x22, 0x2c, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73,
- 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73,
- 0x22, 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72,
- 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x0c, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61,
- 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f,
- 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x73, 0x12, 0x36, 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67,
- 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52,
- 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74,
- 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e,
- 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65,
- 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67,
- 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52,
- 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, 0x0a, 0x14, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46,
- 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
- 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06,
- 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73,
- 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61,
- 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69,
- 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14,
- 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72,
- 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x16,
- 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06,
- 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72,
- 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65,
- 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44,
- 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a,
- 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49,
- 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x12,
- 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63,
- 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73,
- 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73,
- 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x1e, 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65,
- 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x95, 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77,
- 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e,
- 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x14,
- 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
- 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10,
- 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41,
- 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08,
- 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
- 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a,
- 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73,
- 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50,
- 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63,
- 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74,
- 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f,
- 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
- 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61,
- 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46,
- 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
- 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61,
- 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a,
- 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12,
- 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f,
- 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
- 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70,
- 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69,
- 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12,
- 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
- 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64,
- 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a,
- 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63,
- 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x24, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03,
- 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x22,
- 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
- 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
- 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x52, 0x65,
- 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16,
- 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06,
- 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61,
- 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x61,
- 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0xa6,
- 0x02, 0x0a, 0x1c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
- 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12,
- 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72,
- 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f,
- 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x3e, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
- 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74,
- 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74,
- 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65,
- 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64,
- 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64,
- 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d,
- 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65,
- 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79,
- 0x22, 0x2d, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x22,
- 0x82, 0x01, 0x0a, 0x08, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b,
- 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10,
- 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
- 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e,
- 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61,
- 0x62, 0x6c, 0x65, 0x64, 0x22, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74,
- 0x73, 0x12, 0x2a, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x41, 0x0a,
- 0x19, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e,
- 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72,
- 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72,
- 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x26, 0x0a,
- 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63,
- 0x65, 0x53, 0x65, 0x6e, 0x74, 0x22, 0xac, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76,
- 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61,
- 0x6d, 0x65, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41,
- 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x24,
- 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65,
- 0x66, 0x6f, 0x72, 0x65, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65,
- 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b,
- 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65,
- 0x64, 0x44, 0x61, 0x74, 0x65, 0x32, 0xb2, 0x0f, 0x0a, 0x18, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e,
- 0x6c, 0x79, 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23,
- 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74,
- 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f,
- 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12,
- 0x51, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41,
- 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25,
- 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64,
- 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74,
- 0x22, 0x00, 0x12, 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72,
- 0x73, 0x12, 0x16, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65,
- 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50,
- 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73,
- 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43,
- 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52,
- 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50,
- 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73,
- 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00,
- 0x12, 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12,
- 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73,
- 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69,
- 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e,
- 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51, 0x44, 0x4e,
- 0x53, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46, 0x6f, 0x72,
- 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73,
- 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
- 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74,
- 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e,
- 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e,
- 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75,
- 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31,
- 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63,
- 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22,
- 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53,
- 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00,
- 0x12, 0x48, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x78, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65,
- 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65,
- 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
- 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72,
- 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61,
- 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
- 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x65,
- 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x32, 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64,
- 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41,
- 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x34,
- 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a,
- 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73,
- 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67,
- 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f,
- 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0x00, 0x12, 0x3c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a,
- 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65,
- 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12,
- 0x39, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69,
- 0x61, 0x6c, 0x1a, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65,
- 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x1a, 0x2e,
- 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72,
- 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65,
- 0x2e, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a,
- 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12,
- 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12,
- 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74,
- 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
- 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65,
- 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x0a, 0x2e, 0x73,
- 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x0d, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e,
- 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79,
- 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79,
- 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a,
- 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x19,
- 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e, 0x50,
- 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a,
- 0x12, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73,
- 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74,
- 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x32, 0xdf, 0x18, 0x0a, 0x10, 0x53,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12,
- 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73, 0x61,
- 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d,
- 0x65, 0x73, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44,
- 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74,
- 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x1b,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25, 0x2e, 0x73, 0x61,
- 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74,
- 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12,
- 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16,
- 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, 0x64,
- 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x32, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69,
- 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x12, 0x21, 0x2e,
- 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x4d, 0x0a,
- 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x73,
- 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d,
- 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e,
- 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69,
- 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x57, 0x69, 0x6e,
- 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51,
- 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e,
- 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0x00, 0x12,
- 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
- 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72,
- 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0x00, 0x12, 0x48, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74,
- 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47,
- 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e,
- 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65,
- 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3d,
- 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69,
- 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a,
- 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x78, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72,
- 0x64, 0x65, 0x72, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64,
- 0x65, 0x72, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72,
- 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65,
- 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64,
- 0x65, 0x72, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69,
- 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32,
- 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
- 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47,
- 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63,
- 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22,
- 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
- 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3c,
- 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e,
- 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65,
- 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x13,
- 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a,
- 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65,
- 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x61, 0x2e,
- 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52,
- 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65,
- 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
- 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61,
- 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22,
- 0x00, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74,
- 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e, 0x73,
- 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
- 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e,
- 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73,
- 0x46, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53,
- 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x0d, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
- 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f,
- 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61,
- 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x19, 0x50, 0x72, 0x65,
- 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e, 0x50, 0x72, 0x65, 0x76,
- 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45,
- 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73,
- 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x12, 0x53, 0x65,
- 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74,
- 0x12, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72,
- 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72,
- 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64,
- 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e,
- 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19,
- 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x41,
- 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x72,
- 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73,
- 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
- 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x14,
- 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4a,
- 0x0a, 0x18, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e,
- 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32,
- 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x16, 0x44, 0x65,
- 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x22, 0x00, 0x12, 0x54, 0x0a, 0x16, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75,
- 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x20, 0x2e, 0x73,
- 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
- 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x61,
- 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46,
- 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a,
- 0x12, 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x32, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64,
- 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75,
- 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x22,
- 0x00, 0x12, 0x2e, 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x13, 0x2e,
- 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22,
- 0x00, 0x12, 0x40, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64,
- 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f,
- 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65,
- 0x72, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65,
- 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72,
- 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00,
- 0x12, 0x4b, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b,
- 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a,
- 0x0d, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18,
- 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x22, 0x00, 0x12, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x50, 0x72,
- 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72,
- 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
- 0x74, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
- 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72,
- 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
- 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27,
- 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73,
- 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_sa_proto_rawDescOnce sync.Once
- file_sa_proto_rawDescData = file_sa_proto_rawDesc
-)
-
-func file_sa_proto_rawDescGZIP() []byte {
- file_sa_proto_rawDescOnce.Do(func() {
- file_sa_proto_rawDescData = protoimpl.X.CompressGZIP(file_sa_proto_rawDescData)
- })
- return file_sa_proto_rawDescData
-}
-
-var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 49)
-var file_sa_proto_goTypes = []interface{}{
- (*RegistrationID)(nil), // 0: sa.RegistrationID
- (*JSONWebKey)(nil), // 1: sa.JSONWebKey
- (*AuthorizationID)(nil), // 2: sa.AuthorizationID
- (*GetPendingAuthorizationRequest)(nil), // 3: sa.GetPendingAuthorizationRequest
- (*GetValidAuthorizationsRequest)(nil), // 4: sa.GetValidAuthorizationsRequest
- (*ValidAuthorizations)(nil), // 5: sa.ValidAuthorizations
- (*Serial)(nil), // 6: sa.Serial
- (*SerialMetadata)(nil), // 7: sa.SerialMetadata
- (*Range)(nil), // 8: sa.Range
- (*Count)(nil), // 9: sa.Count
- (*Timestamps)(nil), // 10: sa.Timestamps
- (*CountCertificatesByNamesRequest)(nil), // 11: sa.CountCertificatesByNamesRequest
- (*CountByNames)(nil), // 12: sa.CountByNames
- (*CountRegistrationsByIPRequest)(nil), // 13: sa.CountRegistrationsByIPRequest
- (*CountInvalidAuthorizationsRequest)(nil), // 14: sa.CountInvalidAuthorizationsRequest
- (*CountOrdersRequest)(nil), // 15: sa.CountOrdersRequest
- (*CountFQDNSetsRequest)(nil), // 16: sa.CountFQDNSetsRequest
- (*FQDNSetExistsRequest)(nil), // 17: sa.FQDNSetExistsRequest
- (*PreviousCertificateExistsRequest)(nil), // 18: sa.PreviousCertificateExistsRequest
- (*Exists)(nil), // 19: sa.Exists
- (*AddSerialRequest)(nil), // 20: sa.AddSerialRequest
- (*AddCertificateRequest)(nil), // 21: sa.AddCertificateRequest
- (*AddCertificateResponse)(nil), // 22: sa.AddCertificateResponse
- (*OrderRequest)(nil), // 23: sa.OrderRequest
- (*NewOrderRequest)(nil), // 24: sa.NewOrderRequest
- (*NewOrderAndAuthzsRequest)(nil), // 25: sa.NewOrderAndAuthzsRequest
- (*SetOrderErrorRequest)(nil), // 26: sa.SetOrderErrorRequest
- (*GetValidOrderAuthorizationsRequest)(nil), // 27: sa.GetValidOrderAuthorizationsRequest
- (*GetOrderForNamesRequest)(nil), // 28: sa.GetOrderForNamesRequest
- (*FinalizeOrderRequest)(nil), // 29: sa.FinalizeOrderRequest
- (*GetAuthorizationsRequest)(nil), // 30: sa.GetAuthorizationsRequest
- (*Authorizations)(nil), // 31: sa.Authorizations
- (*AddPendingAuthorizationsRequest)(nil), // 32: sa.AddPendingAuthorizationsRequest
- (*AuthorizationIDs)(nil), // 33: sa.AuthorizationIDs
- (*AuthorizationID2)(nil), // 34: sa.AuthorizationID2
- (*Authorization2IDs)(nil), // 35: sa.Authorization2IDs
- (*RevokeCertificateRequest)(nil), // 36: sa.RevokeCertificateRequest
- (*FinalizeAuthorizationRequest)(nil), // 37: sa.FinalizeAuthorizationRequest
- (*AddBlockedKeyRequest)(nil), // 38: sa.AddBlockedKeyRequest
- (*KeyBlockedRequest)(nil), // 39: sa.KeyBlockedRequest
- (*Incident)(nil), // 40: sa.Incident
- (*Incidents)(nil), // 41: sa.Incidents
- (*SerialsForIncidentRequest)(nil), // 42: sa.SerialsForIncidentRequest
- (*IncidentSerial)(nil), // 43: sa.IncidentSerial
- (*GetRevokedCertsRequest)(nil), // 44: sa.GetRevokedCertsRequest
- (*RevocationStatus)(nil), // 45: sa.RevocationStatus
- (*ValidAuthorizations_MapElement)(nil), // 46: sa.ValidAuthorizations.MapElement
- nil, // 47: sa.CountByNames.CountsEntry
- (*Authorizations_MapElement)(nil), // 48: sa.Authorizations.MapElement
- (*timestamppb.Timestamp)(nil), // 49: google.protobuf.Timestamp
- (*proto.Authorization)(nil), // 50: core.Authorization
- (*proto.ProblemDetails)(nil), // 51: core.ProblemDetails
- (*proto.ValidationRecord)(nil), // 52: core.ValidationRecord
- (*emptypb.Empty)(nil), // 53: google.protobuf.Empty
- (*proto.Registration)(nil), // 54: core.Registration
- (*proto.Certificate)(nil), // 55: core.Certificate
- (*proto.CertificateStatus)(nil), // 56: core.CertificateStatus
- (*proto.Order)(nil), // 57: core.Order
- (*proto.CRLEntry)(nil), // 58: core.CRLEntry
-}
-var file_sa_proto_depIdxs = []int32{
- 46, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement
- 8, // 1: sa.CountCertificatesByNamesRequest.range:type_name -> sa.Range
- 47, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry
- 49, // 3: sa.CountByNames.earliest:type_name -> google.protobuf.Timestamp
- 8, // 4: sa.CountRegistrationsByIPRequest.range:type_name -> sa.Range
- 8, // 5: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range
- 8, // 6: sa.CountOrdersRequest.range:type_name -> sa.Range
- 24, // 7: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest
- 50, // 8: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization
- 51, // 9: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails
- 48, // 10: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement
- 50, // 11: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization
- 52, // 12: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord
- 51, // 13: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails
- 40, // 14: sa.Incidents.incidents:type_name -> sa.Incident
- 49, // 15: sa.RevocationStatus.revokedDate:type_name -> google.protobuf.Timestamp
- 50, // 16: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization
- 50, // 17: sa.Authorizations.MapElement.authz:type_name -> core.Authorization
- 11, // 18: sa.StorageAuthorityReadOnly.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest
- 16, // 19: sa.StorageAuthorityReadOnly.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest
- 14, // 20: sa.StorageAuthorityReadOnly.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest
- 15, // 21: sa.StorageAuthorityReadOnly.CountOrders:input_type -> sa.CountOrdersRequest
- 0, // 22: sa.StorageAuthorityReadOnly.CountPendingAuthorizations2:input_type -> sa.RegistrationID
- 13, // 23: sa.StorageAuthorityReadOnly.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest
- 13, // 24: sa.StorageAuthorityReadOnly.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest
- 17, // 25: sa.StorageAuthorityReadOnly.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest
- 16, // 26: sa.StorageAuthorityReadOnly.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest
- 34, // 27: sa.StorageAuthorityReadOnly.GetAuthorization2:input_type -> sa.AuthorizationID2
- 30, // 28: sa.StorageAuthorityReadOnly.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest
- 6, // 29: sa.StorageAuthorityReadOnly.GetCertificate:input_type -> sa.Serial
- 6, // 30: sa.StorageAuthorityReadOnly.GetCertificateStatus:input_type -> sa.Serial
- 53, // 31: sa.StorageAuthorityReadOnly.GetMaxExpiration:input_type -> google.protobuf.Empty
- 23, // 32: sa.StorageAuthorityReadOnly.GetOrder:input_type -> sa.OrderRequest
- 28, // 33: sa.StorageAuthorityReadOnly.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest
- 3, // 34: sa.StorageAuthorityReadOnly.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest
- 6, // 35: sa.StorageAuthorityReadOnly.GetPrecertificate:input_type -> sa.Serial
- 0, // 36: sa.StorageAuthorityReadOnly.GetRegistration:input_type -> sa.RegistrationID
- 1, // 37: sa.StorageAuthorityReadOnly.GetRegistrationByKey:input_type -> sa.JSONWebKey
- 6, // 38: sa.StorageAuthorityReadOnly.GetRevocationStatus:input_type -> sa.Serial
- 44, // 39: sa.StorageAuthorityReadOnly.GetRevokedCerts:input_type -> sa.GetRevokedCertsRequest
- 6, // 40: sa.StorageAuthorityReadOnly.GetSerialMetadata:input_type -> sa.Serial
- 4, // 41: sa.StorageAuthorityReadOnly.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest
- 27, // 42: sa.StorageAuthorityReadOnly.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest
- 6, // 43: sa.StorageAuthorityReadOnly.IncidentsForSerial:input_type -> sa.Serial
- 39, // 44: sa.StorageAuthorityReadOnly.KeyBlocked:input_type -> sa.KeyBlockedRequest
- 18, // 45: sa.StorageAuthorityReadOnly.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest
- 42, // 46: sa.StorageAuthorityReadOnly.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest
- 11, // 47: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest
- 16, // 48: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest
- 14, // 49: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest
- 15, // 50: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest
- 0, // 51: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID
- 13, // 52: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest
- 13, // 53: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest
- 17, // 54: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest
- 16, // 55: sa.StorageAuthority.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest
- 34, // 56: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2
- 30, // 57: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest
- 6, // 58: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial
- 6, // 59: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial
- 53, // 60: sa.StorageAuthority.GetMaxExpiration:input_type -> google.protobuf.Empty
- 23, // 61: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest
- 28, // 62: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest
- 3, // 63: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest
- 6, // 64: sa.StorageAuthority.GetPrecertificate:input_type -> sa.Serial
- 0, // 65: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID
- 1, // 66: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey
- 6, // 67: sa.StorageAuthority.GetRevocationStatus:input_type -> sa.Serial
- 44, // 68: sa.StorageAuthority.GetRevokedCerts:input_type -> sa.GetRevokedCertsRequest
- 6, // 69: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial
- 4, // 70: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest
- 27, // 71: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest
- 6, // 72: sa.StorageAuthority.IncidentsForSerial:input_type -> sa.Serial
- 39, // 73: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest
- 18, // 74: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest
- 42, // 75: sa.StorageAuthority.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest
- 38, // 76: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest
- 21, // 77: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest
- 21, // 78: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest
- 20, // 79: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest
- 34, // 80: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2
- 0, // 81: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID
- 37, // 82: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest
- 29, // 83: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest
- 32, // 84: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest
- 24, // 85: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest
- 25, // 86: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest
- 54, // 87: sa.StorageAuthority.NewRegistration:input_type -> core.Registration
- 36, // 88: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest
- 26, // 89: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest
- 23, // 90: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest
- 54, // 91: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration
- 36, // 92: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest
- 12, // 93: sa.StorageAuthorityReadOnly.CountCertificatesByNames:output_type -> sa.CountByNames
- 9, // 94: sa.StorageAuthorityReadOnly.CountFQDNSets:output_type -> sa.Count
- 9, // 95: sa.StorageAuthorityReadOnly.CountInvalidAuthorizations2:output_type -> sa.Count
- 9, // 96: sa.StorageAuthorityReadOnly.CountOrders:output_type -> sa.Count
- 9, // 97: sa.StorageAuthorityReadOnly.CountPendingAuthorizations2:output_type -> sa.Count
- 9, // 98: sa.StorageAuthorityReadOnly.CountRegistrationsByIP:output_type -> sa.Count
- 9, // 99: sa.StorageAuthorityReadOnly.CountRegistrationsByIPRange:output_type -> sa.Count
- 19, // 100: sa.StorageAuthorityReadOnly.FQDNSetExists:output_type -> sa.Exists
- 10, // 101: sa.StorageAuthorityReadOnly.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps
- 50, // 102: sa.StorageAuthorityReadOnly.GetAuthorization2:output_type -> core.Authorization
- 31, // 103: sa.StorageAuthorityReadOnly.GetAuthorizations2:output_type -> sa.Authorizations
- 55, // 104: sa.StorageAuthorityReadOnly.GetCertificate:output_type -> core.Certificate
- 56, // 105: sa.StorageAuthorityReadOnly.GetCertificateStatus:output_type -> core.CertificateStatus
- 49, // 106: sa.StorageAuthorityReadOnly.GetMaxExpiration:output_type -> google.protobuf.Timestamp
- 57, // 107: sa.StorageAuthorityReadOnly.GetOrder:output_type -> core.Order
- 57, // 108: sa.StorageAuthorityReadOnly.GetOrderForNames:output_type -> core.Order
- 50, // 109: sa.StorageAuthorityReadOnly.GetPendingAuthorization2:output_type -> core.Authorization
- 55, // 110: sa.StorageAuthorityReadOnly.GetPrecertificate:output_type -> core.Certificate
- 54, // 111: sa.StorageAuthorityReadOnly.GetRegistration:output_type -> core.Registration
- 54, // 112: sa.StorageAuthorityReadOnly.GetRegistrationByKey:output_type -> core.Registration
- 45, // 113: sa.StorageAuthorityReadOnly.GetRevocationStatus:output_type -> sa.RevocationStatus
- 58, // 114: sa.StorageAuthorityReadOnly.GetRevokedCerts:output_type -> core.CRLEntry
- 7, // 115: sa.StorageAuthorityReadOnly.GetSerialMetadata:output_type -> sa.SerialMetadata
- 31, // 116: sa.StorageAuthorityReadOnly.GetValidAuthorizations2:output_type -> sa.Authorizations
- 31, // 117: sa.StorageAuthorityReadOnly.GetValidOrderAuthorizations2:output_type -> sa.Authorizations
- 41, // 118: sa.StorageAuthorityReadOnly.IncidentsForSerial:output_type -> sa.Incidents
- 19, // 119: sa.StorageAuthorityReadOnly.KeyBlocked:output_type -> sa.Exists
- 19, // 120: sa.StorageAuthorityReadOnly.PreviousCertificateExists:output_type -> sa.Exists
- 43, // 121: sa.StorageAuthorityReadOnly.SerialsForIncident:output_type -> sa.IncidentSerial
- 12, // 122: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames
- 9, // 123: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count
- 9, // 124: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count
- 9, // 125: sa.StorageAuthority.CountOrders:output_type -> sa.Count
- 9, // 126: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count
- 9, // 127: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count
- 9, // 128: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count
- 19, // 129: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists
- 10, // 130: sa.StorageAuthority.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps
- 50, // 131: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization
- 31, // 132: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations
- 55, // 133: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate
- 56, // 134: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus
- 49, // 135: sa.StorageAuthority.GetMaxExpiration:output_type -> google.protobuf.Timestamp
- 57, // 136: sa.StorageAuthority.GetOrder:output_type -> core.Order
- 57, // 137: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order
- 50, // 138: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization
- 55, // 139: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate
- 54, // 140: sa.StorageAuthority.GetRegistration:output_type -> core.Registration
- 54, // 141: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration
- 45, // 142: sa.StorageAuthority.GetRevocationStatus:output_type -> sa.RevocationStatus
- 58, // 143: sa.StorageAuthority.GetRevokedCerts:output_type -> core.CRLEntry
- 7, // 144: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata
- 31, // 145: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations
- 31, // 146: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations
- 41, // 147: sa.StorageAuthority.IncidentsForSerial:output_type -> sa.Incidents
- 19, // 148: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists
- 19, // 149: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists
- 43, // 150: sa.StorageAuthority.SerialsForIncident:output_type -> sa.IncidentSerial
- 53, // 151: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty
- 22, // 152: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse
- 53, // 153: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty
- 53, // 154: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty
- 53, // 155: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty
- 53, // 156: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty
- 53, // 157: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty
- 53, // 158: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty
- 35, // 159: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs
- 57, // 160: sa.StorageAuthority.NewOrder:output_type -> core.Order
- 57, // 161: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order
- 54, // 162: sa.StorageAuthority.NewRegistration:output_type -> core.Registration
- 53, // 163: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty
- 53, // 164: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty
- 53, // 165: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty
- 53, // 166: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty
- 53, // 167: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty
- 93, // [93:168] is the sub-list for method output_type
- 18, // [18:93] is the sub-list for method input_type
- 18, // [18:18] is the sub-list for extension type_name
- 18, // [18:18] is the sub-list for extension extendee
- 0, // [0:18] is the sub-list for field type_name
-}
-
-func init() { file_sa_proto_init() }
-func file_sa_proto_init() {
- if File_sa_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_sa_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RegistrationID); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*JSONWebKey); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuthorizationID); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetPendingAuthorizationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetValidAuthorizationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidAuthorizations); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Serial); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SerialMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Range); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Count); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Timestamps); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CountCertificatesByNamesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CountByNames); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CountRegistrationsByIPRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CountInvalidAuthorizationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CountOrdersRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CountFQDNSetsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FQDNSetExistsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PreviousCertificateExistsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Exists); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddSerialRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddCertificateRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddCertificateResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OrderRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NewOrderRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NewOrderAndAuthzsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetOrderErrorRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetValidOrderAuthorizationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOrderForNamesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FinalizeOrderRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetAuthorizationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Authorizations); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddPendingAuthorizationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuthorizationIDs); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuthorizationID2); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Authorization2IDs); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RevokeCertificateRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FinalizeAuthorizationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AddBlockedKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyBlockedRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Incident); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Incidents); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SerialsForIncidentRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*IncidentSerial); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetRevokedCertsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RevocationStatus); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidAuthorizations_MapElement); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_sa_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Authorizations_MapElement); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_sa_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 49,
- NumExtensions: 0,
- NumServices: 2,
- },
- GoTypes: file_sa_proto_goTypes,
- DependencyIndexes: file_sa_proto_depIdxs,
- MessageInfos: file_sa_proto_msgTypes,
- }.Build()
- File_sa_proto = out.File
- file_sa_proto_rawDesc = nil
- file_sa_proto_goTypes = nil
- file_sa_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto
deleted file mode 100644
index 1e4ad6fb878..00000000000
--- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto
+++ /dev/null
@@ -1,353 +0,0 @@
-syntax = "proto3";
-
-package sa;
-option go_package = "github.com/letsencrypt/boulder/sa/proto";
-
-import "core/proto/core.proto";
-import "google/protobuf/empty.proto";
-import "google/protobuf/timestamp.proto";
-
-// StorageAuthorityReadOnly exposes only those SA methods which are read-only.
-service StorageAuthorityReadOnly {
- rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
- rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
- rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
- rpc CountOrders(CountOrdersRequest) returns (Count) {}
- rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
- rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
- rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
- rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
- rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
- rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
- rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
- rpc GetCertificate(Serial) returns (core.Certificate) {}
- rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
- rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {}
- rpc GetOrder(OrderRequest) returns (core.Order) {}
- rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
- rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
- rpc GetPrecertificate(Serial) returns (core.Certificate) {}
- rpc GetRegistration(RegistrationID) returns (core.Registration) {}
- rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
- rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
- rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
- rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
- rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
- rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
- rpc IncidentsForSerial(Serial) returns (Incidents) {}
- rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
- rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
- rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
-}
-
-// StorageAuthority provides full read/write access to the database.
-service StorageAuthority {
- // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs.
- rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
- rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
- rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
- rpc CountOrders(CountOrdersRequest) returns (Count) {}
- rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
- rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
- rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
- rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
- rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
- rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
- rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
- rpc GetCertificate(Serial) returns (core.Certificate) {}
- rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
- rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {}
- rpc GetOrder(OrderRequest) returns (core.Order) {}
- rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
- rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
- rpc GetPrecertificate(Serial) returns (core.Certificate) {}
- rpc GetRegistration(RegistrationID) returns (core.Registration) {}
- rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
- rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
- rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
- rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
- rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
- rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
- rpc IncidentsForSerial(Serial) returns (Incidents) {}
- rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
- rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
- rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
- // Adders
- rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {}
- rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {}
- rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {}
- rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {}
- rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {}
- rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {}
- rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {}
- rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {}
- rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {}
- rpc NewOrder(NewOrderRequest) returns (core.Order) {}
- rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {}
- rpc NewRegistration(core.Registration) returns (core.Registration) {}
- rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
- rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {}
- rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {}
- rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {}
- rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
-}
-
-message RegistrationID {
- int64 id = 1;
-}
-
-message JSONWebKey {
- bytes jwk = 1;
-}
-
-message AuthorizationID {
- string id = 1;
-}
-
-message GetPendingAuthorizationRequest {
- int64 registrationID = 1;
- string identifierType = 2;
- string identifierValue = 3;
- // Result must be valid until at least this Unix timestamp (nanos)
- int64 validUntil = 4;
-}
-
-message GetValidAuthorizationsRequest {
- int64 registrationID = 1;
- repeated string domains = 2;
- int64 now = 3; // Unix timestamp (nanoseconds)
-}
-
-message ValidAuthorizations {
- message MapElement {
- string domain = 1;
- core.Authorization authz = 2;
- }
- repeated MapElement valid = 1;
-}
-
-message Serial {
- string serial = 1;
-}
-
-message SerialMetadata {
- string serial = 1;
- int64 registrationID = 2;
- int64 created = 3; // Unix timestamp (nanoseconds)
- int64 expires = 4; // Unix timestamp (nanoseconds)
-}
-
-message Range {
- int64 earliest = 1; // Unix timestamp (nanoseconds)
- int64 latest = 2; // Unix timestamp (nanoseconds)
-}
-
-message Count {
- int64 count = 1;
-}
-
-message Timestamps {
- repeated int64 timestamps = 1; // Unix timestamp (nanoseconds)
-}
-
-message CountCertificatesByNamesRequest {
- Range range = 1;
- repeated string names = 2;
-}
-
-message CountByNames {
- map counts = 1;
- google.protobuf.Timestamp earliest = 2; // Unix timestamp (nanoseconds)
-}
-
-message CountRegistrationsByIPRequest {
- bytes ip = 1;
- Range range = 2;
-}
-
-message CountInvalidAuthorizationsRequest {
- int64 registrationID = 1;
- string hostname = 2;
- // Count authorizations that expire in this range.
- Range range = 3;
-}
-
-message CountOrdersRequest {
- int64 accountID = 1;
- Range range = 2;
-}
-
-message CountFQDNSetsRequest {
- int64 window = 1;
- repeated string domains = 2;
-}
-
-message FQDNSetExistsRequest {
- repeated string domains = 1;
-}
-
-message PreviousCertificateExistsRequest {
- string domain = 1;
- int64 regID = 2;
-}
-
-message Exists {
- bool exists = 1;
-}
-
-message AddSerialRequest {
- int64 regID = 1;
- string serial = 2;
- int64 created = 3; // Unix timestamp (nanoseconds)
- int64 expires = 4; // Unix timestamp (nanoseconds)
-}
-
-message AddCertificateRequest {
- bytes der = 1;
- int64 regID = 2;
- // A signed OCSP response for the certificate contained in "der".
- // Note: The certificate status in the OCSP response is assumed to be 0 (good).
- bytes ocsp = 3;
- // An issued time. When not present the SA defaults to using
- // the current time. The orphan-finder uses this parameter to add
- // certificates with the correct historic issued date
- int64 issued = 4;
- int64 issuerID = 5;
-}
-
-message AddCertificateResponse {
- string digest = 1;
-}
-
-message OrderRequest {
- int64 id = 1;
-}
-
-message NewOrderRequest {
- int64 registrationID = 1;
- int64 expires = 2;
- repeated string names = 3;
- repeated int64 v2Authorizations = 4;
-}
-
-message NewOrderAndAuthzsRequest {
- NewOrderRequest newOrder = 1;
- repeated core.Authorization newAuthzs = 2;
-}
-
-message SetOrderErrorRequest {
- int64 id = 1;
- core.ProblemDetails error = 2;
-}
-
-message GetValidOrderAuthorizationsRequest {
- int64 id = 1;
- int64 acctID = 2;
-}
-
-message GetOrderForNamesRequest {
- int64 acctID = 1;
- repeated string names = 2;
-}
-
-message FinalizeOrderRequest {
- int64 id = 1;
- string certificateSerial = 2;
-}
-
-message GetAuthorizationsRequest {
- int64 registrationID = 1;
- repeated string domains = 2;
- int64 now = 3; // Unix timestamp (nanoseconds)
-}
-
-message Authorizations {
- message MapElement {
- string domain = 1;
- core.Authorization authz = 2;
- }
- repeated MapElement authz = 1;
-}
-
-message AddPendingAuthorizationsRequest {
- repeated core.Authorization authz = 1;
-}
-
-message AuthorizationIDs {
- repeated string ids = 1;
-}
-
-message AuthorizationID2 {
- int64 id = 1;
-}
-
-message Authorization2IDs {
- repeated int64 ids = 1;
-}
-
-message RevokeCertificateRequest {
- string serial = 1;
- int64 reason = 2;
- int64 date = 3; // Unix timestamp (nanoseconds)
- int64 backdate = 5; // Unix timestamp (nanoseconds)
- bytes response = 4;
- int64 issuerID = 6;
-}
-
-message FinalizeAuthorizationRequest {
- int64 id = 1;
- string status = 2;
- int64 expires = 3; // Unix timestamp (nanoseconds)
- string attempted = 4;
- repeated core.ValidationRecord validationRecords = 5;
- core.ProblemDetails validationError = 6;
- int64 attemptedAt = 7; // Unix timestamp (nanoseconds)
-}
-
-message AddBlockedKeyRequest {
- bytes keyHash = 1;
- int64 added = 2; // Unix timestamp (nanoseconds)
- string source = 3;
- string comment = 4;
- int64 revokedBy = 5;
-}
-
-message KeyBlockedRequest {
- bytes keyHash = 1;
-}
-
-message Incident {
- int64 id = 1;
- string serialTable = 2;
- string url = 3;
- int64 renewBy = 4; // Unix timestamp (nanoseconds)
- bool enabled = 5;
-}
-
-message Incidents {
- repeated Incident incidents = 1;
-}
-
-message SerialsForIncidentRequest {
- string incidentTable = 1;
-}
-
-message IncidentSerial {
- string serial = 1;
- int64 registrationID = 2;
- int64 orderID = 3;
- int64 lastNoticeSent = 4; // Unix timestamp (nanoseconds)
-}
-
-message GetRevokedCertsRequest {
- int64 issuerNameID = 1;
- int64 expiresAfter = 2; // Unix timestamp (nanoseconds), inclusive
- int64 expiresBefore = 3; // Unix timestamp (nanoseconds), exclusive
- int64 revokedBefore = 4; // Unix timestamp (nanoseconds)
-}
-
-message RevocationStatus {
- int64 status = 1;
- int64 revokedReason = 2;
- google.protobuf.Timestamp revokedDate = 3; // Unix timestamp (nanoseconds)
-}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go
deleted file mode 100644
index 37a4e026a65..00000000000
--- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go
+++ /dev/null
@@ -1,2937 +0,0 @@
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v3.20.1
-// source: sa.proto
-
-package proto
-
-import (
- context "context"
- proto "github.com/letsencrypt/boulder/core/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-// StorageAuthorityReadOnlyClient is the client API for StorageAuthorityReadOnly service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type StorageAuthorityReadOnlyClient interface {
- CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
- CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
- CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
- CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
- CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
- CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
- CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
- FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
- FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error)
- GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
- GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
- GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error)
- GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
- GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
- GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
- GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
- GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
- GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error)
- GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_GetRevokedCertsClient, error)
- GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error)
- GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error)
- KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
- PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
- SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_SerialsForIncidentClient, error)
-}
-
-type storageAuthorityReadOnlyClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewStorageAuthorityReadOnlyClient(cc grpc.ClientConnInterface) StorageAuthorityReadOnlyClient {
- return &storageAuthorityReadOnlyClient{cc}
-}
-
-func (c *storageAuthorityReadOnlyClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) {
- out := new(CountByNames)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountCertificatesByNames", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountFQDNSets", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountInvalidAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountOrders", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountPendingAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountRegistrationsByIP", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/CountRegistrationsByIPRange", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
- out := new(Exists)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/FQDNSetExists", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) {
- out := new(Timestamps)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/FQDNSetTimestampsForWindow", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) {
- out := new(proto.Authorization)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetAuthorization2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
- out := new(Authorizations)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
- out := new(proto.Certificate)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetCertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) {
- out := new(proto.CertificateStatus)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetCertificateStatus", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) {
- out := new(timestamppb.Timestamp)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetMaxExpiration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
- out := new(proto.Order)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetOrder", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) {
- out := new(proto.Order)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetOrderForNames", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) {
- out := new(proto.Authorization)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetPendingAuthorization2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
- out := new(proto.Certificate)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetPrecertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) {
- out := new(proto.Registration)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetRegistration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) {
- out := new(proto.Registration)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetRegistrationByKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) {
- out := new(RevocationStatus)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetRevocationStatus", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_GetRevokedCertsClient, error) {
- stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[0], "/sa.StorageAuthorityReadOnly/GetRevokedCerts", opts...)
- if err != nil {
- return nil, err
- }
- x := &storageAuthorityReadOnlyGetRevokedCertsClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type StorageAuthorityReadOnly_GetRevokedCertsClient interface {
- Recv() (*proto.CRLEntry, error)
- grpc.ClientStream
-}
-
-type storageAuthorityReadOnlyGetRevokedCertsClient struct {
- grpc.ClientStream
-}
-
-func (x *storageAuthorityReadOnlyGetRevokedCertsClient) Recv() (*proto.CRLEntry, error) {
- m := new(proto.CRLEntry)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) {
- out := new(SerialMetadata)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetSerialMetadata", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
- out := new(Authorizations)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetValidAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
- out := new(Authorizations)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/GetValidOrderAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) {
- out := new(Incidents)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/IncidentsForSerial", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) {
- out := new(Exists)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/KeyBlocked", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
- out := new(Exists)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthorityReadOnly/PreviousCertificateExists", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityReadOnlyClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthorityReadOnly_SerialsForIncidentClient, error) {
- stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[1], "/sa.StorageAuthorityReadOnly/SerialsForIncident", opts...)
- if err != nil {
- return nil, err
- }
- x := &storageAuthorityReadOnlySerialsForIncidentClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type StorageAuthorityReadOnly_SerialsForIncidentClient interface {
- Recv() (*IncidentSerial, error)
- grpc.ClientStream
-}
-
-type storageAuthorityReadOnlySerialsForIncidentClient struct {
- grpc.ClientStream
-}
-
-func (x *storageAuthorityReadOnlySerialsForIncidentClient) Recv() (*IncidentSerial, error) {
- m := new(IncidentSerial)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// StorageAuthorityReadOnlyServer is the server API for StorageAuthorityReadOnly service.
-// All implementations must embed UnimplementedStorageAuthorityReadOnlyServer
-// for forward compatibility
-type StorageAuthorityReadOnlyServer interface {
- CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error)
- CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error)
- CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error)
- CountOrders(context.Context, *CountOrdersRequest) (*Count, error)
- CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error)
- CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
- CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
- FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error)
- FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error)
- GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error)
- GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error)
- GetCertificate(context.Context, *Serial) (*proto.Certificate, error)
- GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error)
- GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error)
- GetOrder(context.Context, *OrderRequest) (*proto.Order, error)
- GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error)
- GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error)
- GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error)
- GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error)
- GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error)
- GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error)
- GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthorityReadOnly_GetRevokedCertsServer) error
- GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error)
- GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error)
- GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error)
- IncidentsForSerial(context.Context, *Serial) (*Incidents, error)
- KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error)
- PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error)
- SerialsForIncident(*SerialsForIncidentRequest, StorageAuthorityReadOnly_SerialsForIncidentServer) error
- mustEmbedUnimplementedStorageAuthorityReadOnlyServer()
-}
-
-// UnimplementedStorageAuthorityReadOnlyServer must be embedded to have forward compatible implementations.
-type UnimplementedStorageAuthorityReadOnlyServer struct {
-}
-
-func (UnimplementedStorageAuthorityReadOnlyServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) {
- return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) {
- return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetMaxExpiration not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthorityReadOnly_GetRevokedCertsServer) error {
- return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) {
- return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) {
- return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) {
- return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) SerialsForIncident(*SerialsForIncidentRequest, StorageAuthorityReadOnly_SerialsForIncidentServer) error {
- return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented")
-}
-func (UnimplementedStorageAuthorityReadOnlyServer) mustEmbedUnimplementedStorageAuthorityReadOnlyServer() {
-}
-
-// UnsafeStorageAuthorityReadOnlyServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to StorageAuthorityReadOnlyServer will
-// result in compilation errors.
-type UnsafeStorageAuthorityReadOnlyServer interface {
- mustEmbedUnimplementedStorageAuthorityReadOnlyServer()
-}
-
-func RegisterStorageAuthorityReadOnlyServer(s grpc.ServiceRegistrar, srv StorageAuthorityReadOnlyServer) {
- s.RegisterService(&StorageAuthorityReadOnly_ServiceDesc, srv)
-}
-
-func _StorageAuthorityReadOnly_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountCertificatesByNamesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountCertificatesByNames(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountCertificatesByNames",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountFQDNSetsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountFQDNSets(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountFQDNSets",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountInvalidAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountInvalidAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountInvalidAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountOrdersRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountOrders(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountOrders",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountOrders(ctx, req.(*CountOrdersRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RegistrationID)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountPendingAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountPendingAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountRegistrationsByIPRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIP(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountRegistrationsByIP",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountRegistrationsByIPRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIPRange(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/CountRegistrationsByIPRange",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(FQDNSetExistsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).FQDNSetExists(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/FQDNSetExists",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountFQDNSetsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).FQDNSetTimestampsForWindow(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/FQDNSetTimestampsForWindow",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthorizationID2)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetAuthorization2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetAuthorization2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetAuthorization2(ctx, req.(*AuthorizationID2))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetCertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetCertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetCertificate(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetCertificateStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetCertificateStatus",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetCertificateStatus(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetMaxExpiration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(emptypb.Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetMaxExpiration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetMaxExpiration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetMaxExpiration(ctx, req.(*emptypb.Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(OrderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetOrder(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetOrder",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetOrder(ctx, req.(*OrderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetOrderForNamesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetOrderForNames(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetOrderForNames",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetPendingAuthorizationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetPendingAuthorization2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetPendingAuthorization2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetPrecertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetPrecertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetPrecertificate(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RegistrationID)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetRegistration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetRegistration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetRegistration(ctx, req.(*RegistrationID))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(JSONWebKey)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetRegistrationByKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetRegistrationByKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetRegistrationByKey(ctx, req.(*JSONWebKey))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetRevocationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetRevocationStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetRevocationStatus",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetRevocationStatus(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetRevokedCerts_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(GetRevokedCertsRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(StorageAuthorityReadOnlyServer).GetRevokedCerts(m, &storageAuthorityReadOnlyGetRevokedCertsServer{stream})
-}
-
-type StorageAuthorityReadOnly_GetRevokedCertsServer interface {
- Send(*proto.CRLEntry) error
- grpc.ServerStream
-}
-
-type storageAuthorityReadOnlyGetRevokedCertsServer struct {
- grpc.ServerStream
-}
-
-func (x *storageAuthorityReadOnlyGetRevokedCertsServer) Send(m *proto.CRLEntry) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _StorageAuthorityReadOnly_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetSerialMetadata(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetSerialMetadata",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetSerialMetadata(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetValidAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetValidAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetValidAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetValidOrderAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).GetValidOrderAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/GetValidOrderAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_IncidentsForSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).IncidentsForSerial(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/IncidentsForSerial",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).IncidentsForSerial(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(KeyBlockedRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).KeyBlocked(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/KeyBlocked",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).KeyBlocked(ctx, req.(*KeyBlockedRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(PreviousCertificateExistsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityReadOnlyServer).PreviousCertificateExists(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthorityReadOnly/PreviousCertificateExists",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityReadOnlyServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthorityReadOnly_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(SerialsForIncidentRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(StorageAuthorityReadOnlyServer).SerialsForIncident(m, &storageAuthorityReadOnlySerialsForIncidentServer{stream})
-}
-
-type StorageAuthorityReadOnly_SerialsForIncidentServer interface {
- Send(*IncidentSerial) error
- grpc.ServerStream
-}
-
-type storageAuthorityReadOnlySerialsForIncidentServer struct {
- grpc.ServerStream
-}
-
-func (x *storageAuthorityReadOnlySerialsForIncidentServer) Send(m *IncidentSerial) error {
- return x.ServerStream.SendMsg(m)
-}
-
-// StorageAuthorityReadOnly_ServiceDesc is the grpc.ServiceDesc for StorageAuthorityReadOnly service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var StorageAuthorityReadOnly_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "sa.StorageAuthorityReadOnly",
- HandlerType: (*StorageAuthorityReadOnlyServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "CountCertificatesByNames",
- Handler: _StorageAuthorityReadOnly_CountCertificatesByNames_Handler,
- },
- {
- MethodName: "CountFQDNSets",
- Handler: _StorageAuthorityReadOnly_CountFQDNSets_Handler,
- },
- {
- MethodName: "CountInvalidAuthorizations2",
- Handler: _StorageAuthorityReadOnly_CountInvalidAuthorizations2_Handler,
- },
- {
- MethodName: "CountOrders",
- Handler: _StorageAuthorityReadOnly_CountOrders_Handler,
- },
- {
- MethodName: "CountPendingAuthorizations2",
- Handler: _StorageAuthorityReadOnly_CountPendingAuthorizations2_Handler,
- },
- {
- MethodName: "CountRegistrationsByIP",
- Handler: _StorageAuthorityReadOnly_CountRegistrationsByIP_Handler,
- },
- {
- MethodName: "CountRegistrationsByIPRange",
- Handler: _StorageAuthorityReadOnly_CountRegistrationsByIPRange_Handler,
- },
- {
- MethodName: "FQDNSetExists",
- Handler: _StorageAuthorityReadOnly_FQDNSetExists_Handler,
- },
- {
- MethodName: "FQDNSetTimestampsForWindow",
- Handler: _StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_Handler,
- },
- {
- MethodName: "GetAuthorization2",
- Handler: _StorageAuthorityReadOnly_GetAuthorization2_Handler,
- },
- {
- MethodName: "GetAuthorizations2",
- Handler: _StorageAuthorityReadOnly_GetAuthorizations2_Handler,
- },
- {
- MethodName: "GetCertificate",
- Handler: _StorageAuthorityReadOnly_GetCertificate_Handler,
- },
- {
- MethodName: "GetCertificateStatus",
- Handler: _StorageAuthorityReadOnly_GetCertificateStatus_Handler,
- },
- {
- MethodName: "GetMaxExpiration",
- Handler: _StorageAuthorityReadOnly_GetMaxExpiration_Handler,
- },
- {
- MethodName: "GetOrder",
- Handler: _StorageAuthorityReadOnly_GetOrder_Handler,
- },
- {
- MethodName: "GetOrderForNames",
- Handler: _StorageAuthorityReadOnly_GetOrderForNames_Handler,
- },
- {
- MethodName: "GetPendingAuthorization2",
- Handler: _StorageAuthorityReadOnly_GetPendingAuthorization2_Handler,
- },
- {
- MethodName: "GetPrecertificate",
- Handler: _StorageAuthorityReadOnly_GetPrecertificate_Handler,
- },
- {
- MethodName: "GetRegistration",
- Handler: _StorageAuthorityReadOnly_GetRegistration_Handler,
- },
- {
- MethodName: "GetRegistrationByKey",
- Handler: _StorageAuthorityReadOnly_GetRegistrationByKey_Handler,
- },
- {
- MethodName: "GetRevocationStatus",
- Handler: _StorageAuthorityReadOnly_GetRevocationStatus_Handler,
- },
- {
- MethodName: "GetSerialMetadata",
- Handler: _StorageAuthorityReadOnly_GetSerialMetadata_Handler,
- },
- {
- MethodName: "GetValidAuthorizations2",
- Handler: _StorageAuthorityReadOnly_GetValidAuthorizations2_Handler,
- },
- {
- MethodName: "GetValidOrderAuthorizations2",
- Handler: _StorageAuthorityReadOnly_GetValidOrderAuthorizations2_Handler,
- },
- {
- MethodName: "IncidentsForSerial",
- Handler: _StorageAuthorityReadOnly_IncidentsForSerial_Handler,
- },
- {
- MethodName: "KeyBlocked",
- Handler: _StorageAuthorityReadOnly_KeyBlocked_Handler,
- },
- {
- MethodName: "PreviousCertificateExists",
- Handler: _StorageAuthorityReadOnly_PreviousCertificateExists_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "GetRevokedCerts",
- Handler: _StorageAuthorityReadOnly_GetRevokedCerts_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "SerialsForIncident",
- Handler: _StorageAuthorityReadOnly_SerialsForIncident_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "sa.proto",
-}
-
-// StorageAuthorityClient is the client API for StorageAuthority service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type StorageAuthorityClient interface {
- // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs.
- CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
- CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
- CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
- CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
- CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
- CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
- CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
- FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
- FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error)
- GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
- GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
- GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error)
- GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
- GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
- GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
- GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
- GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
- GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error)
- GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error)
- GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error)
- GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error)
- KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
- PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
- SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error)
- // Adders
- AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
- AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error)
- DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error)
- FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error)
- NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
- NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error)
- NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error)
- RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error)
- UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
-}
-
-type storageAuthorityClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewStorageAuthorityClient(cc grpc.ClientConnInterface) StorageAuthorityClient {
- return &storageAuthorityClient{cc}
-}
-
-func (c *storageAuthorityClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) {
- out := new(CountByNames)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountCertificatesByNames", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountFQDNSets", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountInvalidAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountOrders", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountPendingAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIP", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) {
- out := new(Count)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIPRange", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
- out := new(Exists)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetExists", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) {
- out := new(Timestamps)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetTimestampsForWindow", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) {
- out := new(proto.Authorization)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorization2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
- out := new(Authorizations)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
- out := new(proto.Certificate)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) {
- out := new(proto.CertificateStatus)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificateStatus", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetMaxExpiration(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*timestamppb.Timestamp, error) {
- out := new(timestamppb.Timestamp)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetMaxExpiration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
- out := new(proto.Order)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrder", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) {
- out := new(proto.Order)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrderForNames", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) {
- out := new(proto.Authorization)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPendingAuthorization2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) {
- out := new(proto.Certificate)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPrecertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) {
- out := new(proto.Registration)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) {
- out := new(proto.Registration)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistrationByKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) {
- out := new(RevocationStatus)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRevocationStatus", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error) {
- stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[0], "/sa.StorageAuthority/GetRevokedCerts", opts...)
- if err != nil {
- return nil, err
- }
- x := &storageAuthorityGetRevokedCertsClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type StorageAuthority_GetRevokedCertsClient interface {
- Recv() (*proto.CRLEntry, error)
- grpc.ClientStream
-}
-
-type storageAuthorityGetRevokedCertsClient struct {
- grpc.ClientStream
-}
-
-func (x *storageAuthorityGetRevokedCertsClient) Recv() (*proto.CRLEntry, error) {
- m := new(proto.CRLEntry)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) {
- out := new(SerialMetadata)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetSerialMetadata", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
- out := new(Authorizations)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) {
- out := new(Authorizations)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidOrderAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) {
- out := new(Incidents)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/IncidentsForSerial", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) {
- out := new(Exists)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/KeyBlocked", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) {
- out := new(Exists)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/PreviousCertificateExists", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error) {
- stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[1], "/sa.StorageAuthority/SerialsForIncident", opts...)
- if err != nil {
- return nil, err
- }
- x := &storageAuthoritySerialsForIncidentClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type StorageAuthority_SerialsForIncidentClient interface {
- Recv() (*IncidentSerial, error)
- grpc.ClientStream
-}
-
-type storageAuthoritySerialsForIncidentClient struct {
- grpc.ClientStream
-}
-
-func (x *storageAuthoritySerialsForIncidentClient) Recv() (*IncidentSerial, error) {
- m := new(IncidentSerial)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddBlockedKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) {
- out := new(AddCertificateResponse)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddCertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddPrecertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddSerial", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateAuthorization2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateRegistration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeAuthorization2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeOrder", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) {
- out := new(Authorization2IDs)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewAuthorizations2", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) {
- out := new(proto.Order)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrder", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) {
- out := new(proto.Order)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrderAndAuthzs", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) {
- out := new(proto.Registration)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewRegistration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/RevokeCertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderError", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderProcessing", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRegistration", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageAuthorityClient) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRevokedCertificate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// StorageAuthorityServer is the server API for StorageAuthority service.
-// All implementations must embed UnimplementedStorageAuthorityServer
-// for forward compatibility
-type StorageAuthorityServer interface {
- // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs.
- CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error)
- CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error)
- CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error)
- CountOrders(context.Context, *CountOrdersRequest) (*Count, error)
- CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error)
- CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
- CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error)
- FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error)
- FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error)
- GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error)
- GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error)
- GetCertificate(context.Context, *Serial) (*proto.Certificate, error)
- GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error)
- GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error)
- GetOrder(context.Context, *OrderRequest) (*proto.Order, error)
- GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error)
- GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error)
- GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error)
- GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error)
- GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error)
- GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error)
- GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error
- GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error)
- GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error)
- GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error)
- IncidentsForSerial(context.Context, *Serial) (*Incidents, error)
- KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error)
- PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error)
- SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error
- // Adders
- AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error)
- AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error)
- AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error)
- AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error)
- DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error)
- DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error)
- FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error)
- FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error)
- NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error)
- NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error)
- NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error)
- NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error)
- RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error)
- SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error)
- SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error)
- UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error)
- UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error)
- mustEmbedUnimplementedStorageAuthorityServer()
-}
-
-// UnimplementedStorageAuthorityServer must be embedded to have forward compatible implementations.
-type UnimplementedStorageAuthorityServer struct {
-}
-
-func (UnimplementedStorageAuthorityServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented")
-}
-func (UnimplementedStorageAuthorityServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented")
-}
-func (UnimplementedStorageAuthorityServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented")
-}
-func (UnimplementedStorageAuthorityServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented")
-}
-func (UnimplementedStorageAuthorityServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented")
-}
-func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) {
- return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented")
-}
-func (UnimplementedStorageAuthorityServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) {
- return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetMaxExpiration(context.Context, *emptypb.Empty) (*timestamppb.Timestamp, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetMaxExpiration not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error {
- return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) {
- return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented")
-}
-func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) {
- return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented")
-}
-func (UnimplementedStorageAuthorityServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) {
- return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented")
-}
-func (UnimplementedStorageAuthorityServer) SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error {
- return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented")
-}
-func (UnimplementedStorageAuthorityServer) AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AddBlockedKey not implemented")
-}
-func (UnimplementedStorageAuthorityServer) AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AddCertificate not implemented")
-}
-func (UnimplementedStorageAuthorityServer) AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AddPrecertificate not implemented")
-}
-func (UnimplementedStorageAuthorityServer) AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AddSerial not implemented")
-}
-func (UnimplementedStorageAuthorityServer) DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented")
-}
-func (UnimplementedStorageAuthorityServer) FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method FinalizeAuthorization2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented")
-}
-func (UnimplementedStorageAuthorityServer) NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) {
- return nil, status.Errorf(codes.Unimplemented, "method NewAuthorizations2 not implemented")
-}
-func (UnimplementedStorageAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) {
- return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented")
-}
-func (UnimplementedStorageAuthorityServer) NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) {
- return nil, status.Errorf(codes.Unimplemented, "method NewOrderAndAuthzs not implemented")
-}
-func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) {
- return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented")
-}
-func (UnimplementedStorageAuthorityServer) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented")
-}
-func (UnimplementedStorageAuthorityServer) SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetOrderError not implemented")
-}
-func (UnimplementedStorageAuthorityServer) SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetOrderProcessing not implemented")
-}
-func (UnimplementedStorageAuthorityServer) UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistration not implemented")
-}
-func (UnimplementedStorageAuthorityServer) UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateRevokedCertificate not implemented")
-}
-func (UnimplementedStorageAuthorityServer) mustEmbedUnimplementedStorageAuthorityServer() {}
-
-// UnsafeStorageAuthorityServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to StorageAuthorityServer will
-// result in compilation errors.
-type UnsafeStorageAuthorityServer interface {
- mustEmbedUnimplementedStorageAuthorityServer()
-}
-
-func RegisterStorageAuthorityServer(s grpc.ServiceRegistrar, srv StorageAuthorityServer) {
- s.RegisterService(&StorageAuthority_ServiceDesc, srv)
-}
-
-func _StorageAuthority_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountCertificatesByNamesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountCertificatesByNames",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountFQDNSetsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountFQDNSets(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountFQDNSets",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountInvalidAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountInvalidAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountOrdersRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountOrders(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountOrders",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountOrders(ctx, req.(*CountOrdersRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RegistrationID)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountPendingAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountRegistrationsByIPRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountRegistrationsByIP",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountRegistrationsByIPRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/CountRegistrationsByIPRange",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(FQDNSetExistsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).FQDNSetExists(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/FQDNSetExists",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountFQDNSetsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/FQDNSetTimestampsForWindow",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthorizationID2)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetAuthorization2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetAuthorization2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetAuthorization2(ctx, req.(*AuthorizationID2))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetCertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetCertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetCertificate(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetCertificateStatus",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetMaxExpiration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(emptypb.Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetMaxExpiration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetMaxExpiration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetMaxExpiration(ctx, req.(*emptypb.Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(OrderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetOrder(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetOrder",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetOrder(ctx, req.(*OrderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetOrderForNamesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetOrderForNames(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetOrderForNames",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetPendingAuthorizationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetPendingAuthorization2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetPrecertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetPrecertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetPrecertificate(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RegistrationID)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetRegistration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetRegistration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetRegistration(ctx, req.(*RegistrationID))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(JSONWebKey)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetRegistrationByKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, req.(*JSONWebKey))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetRevocationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetRevocationStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetRevocationStatus",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetRevocationStatus(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetRevokedCerts_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(GetRevokedCertsRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(StorageAuthorityServer).GetRevokedCerts(m, &storageAuthorityGetRevokedCertsServer{stream})
-}
-
-type StorageAuthority_GetRevokedCertsServer interface {
- Send(*proto.CRLEntry) error
- grpc.ServerStream
-}
-
-type storageAuthorityGetRevokedCertsServer struct {
- grpc.ServerStream
-}
-
-func (x *storageAuthorityGetRevokedCertsServer) Send(m *proto.CRLEntry) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _StorageAuthority_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetSerialMetadata",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetValidAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetValidAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetValidOrderAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/GetValidOrderAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_IncidentsForSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Serial)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).IncidentsForSerial(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/IncidentsForSerial",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).IncidentsForSerial(ctx, req.(*Serial))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(KeyBlockedRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).KeyBlocked(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/KeyBlocked",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).KeyBlocked(ctx, req.(*KeyBlockedRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(PreviousCertificateExistsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/PreviousCertificateExists",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(SerialsForIncidentRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(StorageAuthorityServer).SerialsForIncident(m, &storageAuthoritySerialsForIncidentServer{stream})
-}
-
-type StorageAuthority_SerialsForIncidentServer interface {
- Send(*IncidentSerial) error
- grpc.ServerStream
-}
-
-type storageAuthoritySerialsForIncidentServer struct {
- grpc.ServerStream
-}
-
-func (x *storageAuthoritySerialsForIncidentServer) Send(m *IncidentSerial) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _StorageAuthority_AddBlockedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddBlockedKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).AddBlockedKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/AddBlockedKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).AddBlockedKey(ctx, req.(*AddBlockedKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_AddCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddCertificateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).AddCertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/AddCertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).AddCertificate(ctx, req.(*AddCertificateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_AddPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddCertificateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).AddPrecertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/AddPrecertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).AddPrecertificate(ctx, req.(*AddCertificateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_AddSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddSerialRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).AddSerial(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/AddSerial",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).AddSerial(ctx, req.(*AddSerialRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_DeactivateAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthorizationID2)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/DeactivateAuthorization2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, req.(*AuthorizationID2))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RegistrationID)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/DeactivateRegistration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, req.(*RegistrationID))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_FinalizeAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(FinalizeAuthorizationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/FinalizeAuthorization2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, req.(*FinalizeAuthorizationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(FinalizeOrderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).FinalizeOrder(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/FinalizeOrder",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_NewAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AddPendingAuthorizationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/NewAuthorizations2",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, req.(*AddPendingAuthorizationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_NewOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(NewOrderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).NewOrder(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/NewOrder",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_NewOrderAndAuthzs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(NewOrderAndAuthzsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/NewOrderAndAuthzs",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, req.(*NewOrderAndAuthzsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(proto.Registration)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).NewRegistration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/NewRegistration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).NewRegistration(ctx, req.(*proto.Registration))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RevokeCertificateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).RevokeCertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/RevokeCertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).RevokeCertificate(ctx, req.(*RevokeCertificateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_SetOrderError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SetOrderErrorRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).SetOrderError(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/SetOrderError",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).SetOrderError(ctx, req.(*SetOrderErrorRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_SetOrderProcessing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(OrderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/SetOrderProcessing",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, req.(*OrderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_UpdateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(proto.Registration)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).UpdateRegistration(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/UpdateRegistration",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).UpdateRegistration(ctx, req.(*proto.Registration))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _StorageAuthority_UpdateRevokedCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RevokeCertificateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/sa.StorageAuthority/UpdateRevokedCertificate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, req.(*RevokeCertificateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// StorageAuthority_ServiceDesc is the grpc.ServiceDesc for StorageAuthority service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var StorageAuthority_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "sa.StorageAuthority",
- HandlerType: (*StorageAuthorityServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "CountCertificatesByNames",
- Handler: _StorageAuthority_CountCertificatesByNames_Handler,
- },
- {
- MethodName: "CountFQDNSets",
- Handler: _StorageAuthority_CountFQDNSets_Handler,
- },
- {
- MethodName: "CountInvalidAuthorizations2",
- Handler: _StorageAuthority_CountInvalidAuthorizations2_Handler,
- },
- {
- MethodName: "CountOrders",
- Handler: _StorageAuthority_CountOrders_Handler,
- },
- {
- MethodName: "CountPendingAuthorizations2",
- Handler: _StorageAuthority_CountPendingAuthorizations2_Handler,
- },
- {
- MethodName: "CountRegistrationsByIP",
- Handler: _StorageAuthority_CountRegistrationsByIP_Handler,
- },
- {
- MethodName: "CountRegistrationsByIPRange",
- Handler: _StorageAuthority_CountRegistrationsByIPRange_Handler,
- },
- {
- MethodName: "FQDNSetExists",
- Handler: _StorageAuthority_FQDNSetExists_Handler,
- },
- {
- MethodName: "FQDNSetTimestampsForWindow",
- Handler: _StorageAuthority_FQDNSetTimestampsForWindow_Handler,
- },
- {
- MethodName: "GetAuthorization2",
- Handler: _StorageAuthority_GetAuthorization2_Handler,
- },
- {
- MethodName: "GetAuthorizations2",
- Handler: _StorageAuthority_GetAuthorizations2_Handler,
- },
- {
- MethodName: "GetCertificate",
- Handler: _StorageAuthority_GetCertificate_Handler,
- },
- {
- MethodName: "GetCertificateStatus",
- Handler: _StorageAuthority_GetCertificateStatus_Handler,
- },
- {
- MethodName: "GetMaxExpiration",
- Handler: _StorageAuthority_GetMaxExpiration_Handler,
- },
- {
- MethodName: "GetOrder",
- Handler: _StorageAuthority_GetOrder_Handler,
- },
- {
- MethodName: "GetOrderForNames",
- Handler: _StorageAuthority_GetOrderForNames_Handler,
- },
- {
- MethodName: "GetPendingAuthorization2",
- Handler: _StorageAuthority_GetPendingAuthorization2_Handler,
- },
- {
- MethodName: "GetPrecertificate",
- Handler: _StorageAuthority_GetPrecertificate_Handler,
- },
- {
- MethodName: "GetRegistration",
- Handler: _StorageAuthority_GetRegistration_Handler,
- },
- {
- MethodName: "GetRegistrationByKey",
- Handler: _StorageAuthority_GetRegistrationByKey_Handler,
- },
- {
- MethodName: "GetRevocationStatus",
- Handler: _StorageAuthority_GetRevocationStatus_Handler,
- },
- {
- MethodName: "GetSerialMetadata",
- Handler: _StorageAuthority_GetSerialMetadata_Handler,
- },
- {
- MethodName: "GetValidAuthorizations2",
- Handler: _StorageAuthority_GetValidAuthorizations2_Handler,
- },
- {
- MethodName: "GetValidOrderAuthorizations2",
- Handler: _StorageAuthority_GetValidOrderAuthorizations2_Handler,
- },
- {
- MethodName: "IncidentsForSerial",
- Handler: _StorageAuthority_IncidentsForSerial_Handler,
- },
- {
- MethodName: "KeyBlocked",
- Handler: _StorageAuthority_KeyBlocked_Handler,
- },
- {
- MethodName: "PreviousCertificateExists",
- Handler: _StorageAuthority_PreviousCertificateExists_Handler,
- },
- {
- MethodName: "AddBlockedKey",
- Handler: _StorageAuthority_AddBlockedKey_Handler,
- },
- {
- MethodName: "AddCertificate",
- Handler: _StorageAuthority_AddCertificate_Handler,
- },
- {
- MethodName: "AddPrecertificate",
- Handler: _StorageAuthority_AddPrecertificate_Handler,
- },
- {
- MethodName: "AddSerial",
- Handler: _StorageAuthority_AddSerial_Handler,
- },
- {
- MethodName: "DeactivateAuthorization2",
- Handler: _StorageAuthority_DeactivateAuthorization2_Handler,
- },
- {
- MethodName: "DeactivateRegistration",
- Handler: _StorageAuthority_DeactivateRegistration_Handler,
- },
- {
- MethodName: "FinalizeAuthorization2",
- Handler: _StorageAuthority_FinalizeAuthorization2_Handler,
- },
- {
- MethodName: "FinalizeOrder",
- Handler: _StorageAuthority_FinalizeOrder_Handler,
- },
- {
- MethodName: "NewAuthorizations2",
- Handler: _StorageAuthority_NewAuthorizations2_Handler,
- },
- {
- MethodName: "NewOrder",
- Handler: _StorageAuthority_NewOrder_Handler,
- },
- {
- MethodName: "NewOrderAndAuthzs",
- Handler: _StorageAuthority_NewOrderAndAuthzs_Handler,
- },
- {
- MethodName: "NewRegistration",
- Handler: _StorageAuthority_NewRegistration_Handler,
- },
- {
- MethodName: "RevokeCertificate",
- Handler: _StorageAuthority_RevokeCertificate_Handler,
- },
- {
- MethodName: "SetOrderError",
- Handler: _StorageAuthority_SetOrderError_Handler,
- },
- {
- MethodName: "SetOrderProcessing",
- Handler: _StorageAuthority_SetOrderProcessing_Handler,
- },
- {
- MethodName: "UpdateRegistration",
- Handler: _StorageAuthority_UpdateRegistration_Handler,
- },
- {
- MethodName: "UpdateRevokedCertificate",
- Handler: _StorageAuthority_UpdateRevokedCertificate_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "GetRevokedCerts",
- Handler: _StorageAuthority_GetRevokedCerts_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "SerialsForIncident",
- Handler: _StorageAuthority_SerialsForIncident_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "sa.proto",
-}
diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go b/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go
deleted file mode 100644
index 5e2221f3f24..00000000000
--- a/vendor/github.com/letsencrypt/boulder/sa/proto/subsets.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copied from the auto-generated sa_grpc.pb.go
-
-package proto
-
-import (
- context "context"
-
- proto "github.com/letsencrypt/boulder/core/proto"
- grpc "google.golang.org/grpc"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
-)
-
-// StorageAuthorityGetterClient is a read-only subset of the sapb.StorageAuthorityClient interface
-type StorageAuthorityGetterClient interface {
- GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
- GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
- GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
- CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
- CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
- CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
- CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
- CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
- FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
- PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
- GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
- GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
- CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
- GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
- GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
- KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
- GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
- GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
- IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error)
-}
-
-// StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates
-type StorageAuthorityCertificateClient interface {
- AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
- AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
- GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
-}
diff --git a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go
new file mode 100644
index 00000000000..8e3bae9965a
--- /dev/null
+++ b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go
@@ -0,0 +1,46 @@
+// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml`
+package strictyaml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "gopkg.in/yaml.v3"
+)
+
+// Unmarshal takes a byte array and an interface passed by reference. The
+// d.Decode will read the next YAML-encoded value from its input and store it in
+// the value pointed to by yamlObj. Any config keys from the incoming YAML
+// document which do not correspond to expected keys in the config struct will
+// result in errors.
+//
+// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with
+// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added
+// upstream.
+func Unmarshal(b []byte, yamlObj interface{}) error {
+ r := bytes.NewReader(b)
+
+ d := yaml.NewDecoder(r)
+ d.KnownFields(true)
+
+ // d.Decode will mutate yamlObj
+ err := d.Decode(yamlObj)
+
+ if err != nil {
+ // io.EOF is returned when the YAML document is empty.
+ if errors.Is(err, io.EOF) {
+ return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err)
+ }
+ return fmt.Errorf("unmarshalling YAML: %w", err)
+ }
+
+ // As bytes are read by the decoder, the length of the byte buffer should
+ // decrease. If it doesn't, there's a problem.
+ if r.Len() != 0 {
+ return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index a912b75a05b..62de4dc59aa 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -59,6 +59,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+ CounterOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
@@ -174,16 +186,24 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
+ return V2.NewCounterVec(CounterVecOpts{
+ CounterOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 8bc5e44e2fc..12331542dde 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -14,20 +14,16 @@
package prometheus
import (
- "errors"
"fmt"
"sort"
"strings"
- "github.com/cespare/xxhash/v2"
-
"github.com/prometheus/client_golang/prometheus/internal"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
+ "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
@@ -54,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
+ // variableLabels contains names of labels and normalization function for
+ // which the metric maintains variable values.
+ variableLabels ConstrainedLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@@ -80,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
- variableLabels: variableLabels,
+ variableLabels: variableLabels.constrainedLabels(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ for _, label := range d.variableLabels {
+ if !checkLabelName(label.Name) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
return d
}
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
+ labelNames = append(labelNames, "$"+label.Name)
+ labelNameSet[label.Name] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
+ d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 811072cbd54..962608f02c6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -37,35 +37,35 @@
//
// type metrics struct {
// cpuTemp prometheus.Gauge
-// hdFailures *prometheus.CounterVec
+// hdFailures *prometheus.CounterVec
// }
//
// func NewMetrics(reg prometheus.Registerer) *metrics {
-// m := &metrics{
-// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// }),
-// hdFailures: prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// ),
-// }
-// reg.MustRegister(m.cpuTemp)
-// reg.MustRegister(m.hdFailures)
-// return m
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
// }
//
// func main() {
-// // Create a non-global registry.
-// reg := prometheus.NewRegistry()
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
//
-// // Create new metrics and register them using the custom registry.
-// m := NewMetrics(reg)
-// // Set values for the new created metrics.
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
// m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 21271a5bb46..f1ea6c76f75 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+ GaugeOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
@@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
+ return V2.NewGaugeVec(GaugeVecOpts{
+ GaugeOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index 3a2d55e84b1..2d8d9f64f43 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -23,11 +23,10 @@ import (
"strings"
"sync"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
-
"github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
const (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 4c873a01c3d..5b69965b25b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -22,10 +22,9 @@ import (
"sync/atomic"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
@@ -469,6 +468,18 @@ type HistogramOpts struct {
NativeHistogramMaxZeroThreshold float64
}
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+ HistogramOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
@@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
}
for _, n := range desc.variableLabels {
- if n == bucketLabel {
+ if n.Name == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
@@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
- h.counts[0] = &histogramCounts{
- buckets: make([]uint64, len(h.upperBounds)),
- nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
- nativeHistogramSchema: h.nativeHistogramSchema,
- }
- h.counts[1] = &histogramCounts{
- buckets: make([]uint64, len(h.upperBounds)),
- nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
- nativeHistogramSchema: h.nativeHistogramSchema,
- }
+ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@@ -1034,15 +1041,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
+ return V2.NewHistogramVec(HistogramVecOpts{
+ HistogramOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
+ return newHistogram(desc, opts.HistogramOpts, lvs...)
}),
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index c1b8fad36ae..63ff8683ce5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -32,6 +32,78 @@ import (
// create a Desc.
type Labels map[string]string
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+ Name string
+ Constraint func(string) string
+}
+
+func (cl ConstrainedLabel) Constrain(v string) string {
+ if cl.Constraint == nil {
+ return v
+ }
+ return cl.Constraint(v)
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+// prometheus.V2().NewCounterVec(CounterVecOpts{
+// CounterOpts: {...}, // Usual CounterOpts fields
+// VariableLabels: []ConstrainedLabels{
+// {Name: "A"},
+// {Name: "B", Constraint: func(v string) string { ... }},
+// },
+// })
+type ConstrainableLabels interface {
+ constrainedLabels() ConstrainedLabels
+ labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
+ return cls
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+ names := make([]string, len(cls))
+ for i, label := range cls {
+ names[i] = label.Name
+ }
+ return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+// UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+// ConstrainedLabels {
+// { Name: "A" },
+// { Name: "B" },
+// }
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
+ constrainedLabels := make([]ConstrainedLabel, len(uls))
+ for i, l := range uls {
+ constrainedLabels[i] = ConstrainedLabel{Name: l}
+ }
+ return constrainedLabels
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+ return uls
+}
+
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index b5119c50410..07bbc9d7687 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -20,11 +20,9 @@ import (
"strings"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 21086781621..d3482c40ca7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
o.apply(rtOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- addWithExemplar(
- counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
- 1,
- rtOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
@@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
o.apply(rtOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
- time.Since(start).Seconds(),
- rtOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index cca67a78a90..3793036ad09 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
@@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
-
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
@@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
o.apply(hOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- addWithExemplar(
- counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- 1,
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- addWithExemplar(
- counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- 1,
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
@@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, status, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
})
next.ServeHTTP(d, r)
}
@@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
+
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- float64(size),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- float64(size),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
@@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- float64(d.Written()),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
})
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index c590d912c94..5d4383aa14a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -24,14 +24,32 @@ type Option interface {
apply(*options)
}
+// LabelValueFromCtx are used to compute the label value from request context.
+// Context can be filled with values from request through middleware.
+type LabelValueFromCtx func(ctx context.Context) string
+
// options store options for both a handler or round tripper.
type options struct {
- extraMethods []string
- getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraMethods []string
+ getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraLabelsFromCtx map[string]LabelValueFromCtx
}
func defaultOptions() *options {
- return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
+ return &options{
+ getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
+ extraLabelsFromCtx: map[string]LabelValueFromCtx{},
+ }
+}
+
+func (o *options) emptyDynamicLabels() prometheus.Labels {
+ labels := prometheus.Labels{}
+
+ for label := range o.extraLabelsFromCtx {
+ labels[label] = ""
+ }
+
+ return labels
}
type optionApplyFunc func(*options)
@@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
})
}
-// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
-// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
-// will get instrumented without exemplar.
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
+// metric will continue to observe/increment.
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
return optionApplyFunc(func(o *options) {
o.getExemplarFn = getExemplarFn
})
}
+
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
+ return optionApplyFunc(func(o *options) {
+ o.extraLabelsFromCtx[name] = valueFn
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 09e34d307c9..44da9433bee 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -21,18 +21,17 @@ import (
"path/filepath"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"unicode/utf8"
- "github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/prometheus/common/expfmt"
+ "google.golang.org/protobuf/proto"
)
const (
@@ -933,6 +932,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
+ if dtoMetric.TimestampMs != nil {
+ h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+ h.Write(separatorByteSlice)
+ }
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
@@ -962,7 +965,7 @@ func checkDescConsistency(
copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
- Name: proto.String(l),
+ Name: proto.String(l.Name),
})
}
if len(lpsFromDesc) != len(dtoMetric.Label) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 7bc448a8939..dd359264e59 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -22,11 +22,10 @@ import (
"sync/atomic"
"time"
- "github.com/beorn7/perks/quantile"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/beorn7/perks/quantile"
+ "google.golang.org/protobuf/proto"
)
// quantileLabel is used for the label that defines the quantile in a
@@ -148,6 +147,18 @@ type SummaryOpts struct {
BufCap uint32
}
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+ SummaryOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// Problem with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
@@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary {
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
}
for _, n := range desc.variableLabels {
- if n == quantileLabel {
+ if n.Name == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
@@ -530,20 +541,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile†is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- for _, ln := range labelNames {
+ return V2.NewSummaryVec(SummaryVecOpts{
+ SummaryOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+ for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
- desc := NewDesc(
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
+ return newSummary(desc, opts.SummaryOpts, lvs...)
}),
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index f28a76f3a62..52344fef53f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -23,7 +23,9 @@ type Timer struct {
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
// following way:
//
// func TimeMe() {
@@ -31,6 +33,14 @@ type Timer struct {
// defer timer.ObserveDuration()
// // Do actual work.
// }
+//
+// or
+//
+// func TimeMeWithExemplar() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDurationWithExemplar(exemplar)
+// // Do actual work.
+// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
@@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
}
return d
}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+ d := time.Since(t.begin)
+ eo, ok := t.observer.(ExemplarObserver)
+ if ok && exemplar != nil {
+ eo.ObserveWithExemplar(d.Seconds(), exemplar)
+ return d
+ }
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 2d3abc1cbd6..5f6bb80014d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -19,13 +19,11 @@ import (
"time"
"unicode/utf8"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
+ for i, l := range desc.variableLabels {
labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
+ Name: proto.String(l.Name),
Value: proto.String(labelValues[i]),
})
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 7ae322590c8..386fb2d23e2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -72,6 +72,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@@ -91,6 +92,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
+ labels = constrainLabels(m.desc, labels)
h, err := m.hashLabels(labels)
if err != nil {
return false
@@ -106,6 +108,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ labels = constrainLabels(m.desc, labels)
return m.metricMap.deleteByLabels(labels, m.curry)
}
@@ -145,10 +148,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
iCurry int
)
for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ val, ok := labels[label.Name]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
- return nil, fmt.Errorf("label name %q is already curried", label)
+ return nil, fmt.Errorf("label name %q is already curried", label.Name)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@@ -156,7 +159,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
- newCurry = append(newCurry, curriedLabelValue{i, val})
+ newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@@ -199,6 +202,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@@ -224,6 +228,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ labels = constrainLabels(m.desc, labels)
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@@ -266,16 +271,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
iCurry int
)
for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ val, ok := labels[label.Name]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
- return 0, fmt.Errorf("label name %q is already curried", label)
+ return 0, fmt.Errorf("label name %q is already curried", label.Name)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
+ return 0, fmt.Errorf("label name %q missing in label map", label.Name)
}
h = m.hashAdd(h, val)
}
@@ -453,7 +458,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels {
// Check if the target label exists in our metrics and get the index.
- varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
if validLabel {
// Check the value of that label against the target value.
// We don't consider curried values in partial matches.
@@ -605,7 +610,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++
continue
}
- if values[i] != labels[k] {
+ if values[i] != labels[k.Name] {
return false
}
}
@@ -621,7 +626,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []
iCurry++
continue
}
- labelValues[i] = labels[k]
+ labelValues[i] = labels[k.Name]
}
return labelValues
}
@@ -640,3 +645,34 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
return labelValues
}
+
+func constrainLabels(desc *Desc, labels Labels) Labels {
+ constrainedValues := make(Labels, len(labels))
+ for l, v := range labels {
+ if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
+ constrainedValues[l] = desc.variableLabels[i].Constrain(v)
+ continue
+ }
+ constrainedValues[l] = v
+ }
+ return constrainedValues
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+ constrainedValues := make([]string, len(lvs))
+ var iCurry, iLVs int
+ for i := 0; i < len(lvs)+len(curry); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ iCurry++
+ continue
+ }
+
+ if i < len(desc.variableLabels) {
+ constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
+ } else {
+ constrainedValues[iLVs] = lvs[iLVs]
+ }
+ iLVs++
+ }
+ return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 00000000000..42bc3a8f066
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 1498ee144cb..25da157f152 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,12 +17,10 @@ import (
"fmt"
"sort"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "google.golang.org/protobuf/proto"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
- newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 35904ea1986..2b5bca4b999 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,25 +1,38 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type MetricType int32
@@ -38,23 +51,25 @@ const (
MetricType_GAUGE_HISTOGRAM MetricType = 5
)
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
- 5: "GAUGE_HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
- "GAUGE_HISTOGRAM": 5,
-}
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
func (x MetricType) Enum() *MetricType {
p := new(MetricType)
@@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType {
}
func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = MetricType(value)
+ *x = MetricType(num)
return nil
}
+// Deprecated: Use MetricType.Descriptor instead.
func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return ""
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{1}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{2}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Counter proto.InternalMessageInfo
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{3}
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
-}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
}
return 0
}
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{4}
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
+
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Summary proto.InternalMessageInfo
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
}
return nil
}
type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{5}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
-}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Histogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
- ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
- ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
- ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
// Negative buckets for the native histogram.
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
// Use either "negative_delta" or "negative_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
- NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
// Positive buckets for the native histogram.
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
- PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{6}
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Histogram) GetSampleCountFloat() float64 {
- if m != nil && m.SampleCountFloat != nil {
- return *m.SampleCountFloat
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
}
return 0
}
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
}
return nil
}
-func (m *Histogram) GetSchema() int32 {
- if m != nil && m.Schema != nil {
- return *m.Schema
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
}
return 0
}
-func (m *Histogram) GetZeroThreshold() float64 {
- if m != nil && m.ZeroThreshold != nil {
- return *m.ZeroThreshold
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
}
return 0
}
-func (m *Histogram) GetZeroCount() uint64 {
- if m != nil && m.ZeroCount != nil {
- return *m.ZeroCount
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
}
return 0
}
-func (m *Histogram) GetZeroCountFloat() float64 {
- if m != nil && m.ZeroCountFloat != nil {
- return *m.ZeroCountFloat
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
}
return 0
}
-func (m *Histogram) GetNegativeSpan() []*BucketSpan {
- if m != nil {
- return m.NegativeSpan
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
}
return nil
}
-func (m *Histogram) GetNegativeDelta() []int64 {
- if m != nil {
- return m.NegativeDelta
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
}
return nil
}
-func (m *Histogram) GetNegativeCount() []float64 {
- if m != nil {
- return m.NegativeCount
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
}
return nil
}
-func (m *Histogram) GetPositiveSpan() []*BucketSpan {
- if m != nil {
- return m.PositiveSpan
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
}
return nil
}
-func (m *Histogram) GetPositiveDelta() []int64 {
- if m != nil {
- return m.PositiveDelta
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
}
return nil
}
-func (m *Histogram) GetPositiveCount() []float64 {
- if m != nil {
- return m.PositiveCount
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
}
return nil
}
@@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 {
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{7}
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
-}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
}
return 0
}
-func (m *Bucket) GetCumulativeCountFloat() float64 {
- if m != nil && m.CumulativeCountFloat != nil {
- return *m.CumulativeCountFloat
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
}
return 0
}
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
}
return 0
}
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
@@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar {
// structured here (with all the buckets in a single array separate
// from the Spans).
type BucketSpan struct {
- Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
- Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *BucketSpan) Reset() { *m = BucketSpan{} }
-func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
-func (*BucketSpan) ProtoMessage() {}
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{8}
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
}
-func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
-}
-func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
-}
-func (m *BucketSpan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BucketSpan.Merge(m, src)
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BucketSpan) XXX_Size() int {
- return xxx_messageInfo_BucketSpan.Size(m)
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BucketSpan) XXX_DiscardUnknown() {
- xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
-func (m *BucketSpan) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
}
return 0
}
-func (m *BucketSpan) GetLength() uint32 {
- if m != nil && m.Length != nil {
- return *m.Length
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
}
return 0
}
type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{9}
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{10}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Metric proto.InternalMessageInfo
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
}
return nil
}
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
}
return nil
}
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
}
return nil
}
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
}
return nil
}
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
}
return nil
}
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
}
return 0
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{11}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
-}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
}
return ""
}
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return MetricType_COUNTER
}
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
}
return nil
}
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-}
-
-func init() {
- proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
-}
-
-var fileDescriptor_d1e5ddb18987a258 = []byte{
- // 896 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
- 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
- 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
- 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
- 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
- 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
- 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
- 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
- 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
- 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
- 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
- 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
- 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
- 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
- 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
- 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
- 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
- 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
- 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
- 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
- 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
- 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
- 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
- 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
- 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
- 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
- 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
- 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
- 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
- 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
- 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
- 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
- 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
- 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
- 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
- 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
- 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
- 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
- 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
- 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
- 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
- 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
- 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
- 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
- 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
- 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
- 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
- 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
- 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
- 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
- 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
- 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
- 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
- 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
- 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
- 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+ 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d,
+ 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c,
+ 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a,
+ 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72,
+ 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
+ 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72,
+ 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03,
+ 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74,
+ 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
+ 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12,
+ 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74,
+ 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d,
+ 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01,
+ 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75,
+ 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70,
+ 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a,
+ 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
+ 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69,
+ 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38,
+ 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50,
+ 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61,
+ 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a,
+ 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
+ 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53,
+ 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52,
+ 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69,
+ 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
+ 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a,
+ 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
+ 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41,
+ 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59,
+ 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12,
+ 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13,
+ 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41,
+ 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+ 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f,
+ 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 16, // [16:16] is the sub-list for method output_type
+ 16, // [16:16] is the sub-list for method input_type
+ 16, // [16:16] is the sub-list for extension type_name
+ 16, // [16:16] is the sub-list for extension extendee
+ 0, // [0:16] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f841d63..f4fc8845522 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
- p TextParser
- fams []*dto.MetricFamily
+ fams map[string]*dto.MetricFamily
+ err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
+ if d.err == nil {
+ // Read all metrics in one shot.
+ var p TextParser
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
}
}
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ *v = *fam
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index f819e4f8b54..dfac962a4e7 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -21,8 +21,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 9d94ae9effe..21cdddcf054 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -46,20 +46,20 @@ import (
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
+// line. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
+// - No support for the following (optional) features: `# UNIT` line, `_created`
+// line, info type, stateset type, gaugehistogram type.
//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName()
if name == "" {
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b0654..2946b8f1a64 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
- "io/ioutil"
"math"
"strconv"
"strings"
@@ -44,7 +43,7 @@ const (
var (
bufPool = sync.Pool{
New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
+ return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 84be0643ec6..ac2482782c7 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) {
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if p.err == io.EOF {
+ p.err = nil
+ }
return nil
}
switch p.currentByte {
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
index 26e92288c7c..a21b9d15dd8 100644
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
*/
package goautoneg
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index c909b8aa8c5..5727452c1ee 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"math"
- "regexp"
"strconv"
"strings"
"time"
@@ -183,54 +182,78 @@ func (d *Duration) Type() string {
return "duration"
}
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- switch durationStr {
+func ParseDuration(s string) (Duration, error) {
+ switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, errors.New("empty duration string")
}
- matches := durationRE.FindStringSubmatch(durationStr)
- if matches == nil {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var dur time.Duration
- // Parse the match at pos `pos` in the regex and use `mult` to turn that
- // into ms, then add that value to the total parsed duration.
- var overflowErr error
- m := func(pos int, mult time.Duration) {
- if matches[pos] == "" {
- return
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
- n, _ := strconv.Atoi(matches[pos])
+ s = s[i:]
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
- if n > int((1<<63-1)/mult/time.Millisecond) {
- overflowErr = errors.New("duration out of range")
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
}
- d := time.Duration(n) * time.Millisecond
- dur += d * mult
-
- if dur < 0 {
- overflowErr = errors.New("duration out of range")
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
}
}
-
- m(2, 1000*60*60*24*365) // y
- m(4, 1000*60*60*24*7) // w
- m(6, 1000*60*60*24) // d
- m(8, 1000*60*60) // h
- m(10, 1000*60) // m
- m(12, 1000) // s
- m(14, 1) // ms
-
- return Duration(dur), overflowErr
+ return Duration(dur), nil
}
func (d Duration) String() string {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1a283..9eb440413fd 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -16,20 +16,12 @@ package model
import (
"encoding/json"
"fmt"
- "math"
"sort"
"strconv"
"strings"
)
var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
@@ -38,82 +30,14 @@ var (
ZeroSample = Sample{Timestamp: Earliest}
)
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
-
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@@ -142,6 +74,19 @@ func (s Sample) String() string {
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) {
Value: s.Value,
},
}
-
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
}
if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
}
s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
return nil
}
@@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool {
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else if len(ss.Histograms) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
}
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
}
// Scalar is a scalar value evaluated at the set timestamp.
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 00000000000..0f615a70530
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,100 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 00000000000..54bb038cfff
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,178 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (b HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
+ upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", b.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, fmt.Errorf("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return fmt.Errorf("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 00000000000..726c50ee638
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 6c8e3e21979..e358db69c5d 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),)
endif
endif
-PROMU_VERSION ?= 0.13.0
+PROMU_VERSION ?= 0.14.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.45.2
+GOLANGCI_LINT_VERSION ?= v1.49.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
- ifeq (,$(CIRCLE_JOB))
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index ff6b927da15..06968ca2ed4 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil
}
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 00000000000..d88442f0edf
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index ea41bf2ca1e..a6b2b3127cb 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
index d31a82600f6..f9d961e4417 100644
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
//
// Example:
//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
package procfs
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828bb1da..0c482c18ccf 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
+//
+// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index a94f86dc4ae..06b7b8f2163 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,8 +27,9 @@ import (
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
@@ -38,6 +39,18 @@ type SoftnetStat struct {
Dropped uint32
// Number of times processing packets ran out of quota.
TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
}
var softNetProcFile = "net/softnet_stat"
@@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
+ softnetStat := SoftnetStat{}
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
}
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
+ }
+
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
}
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
}
return stats, nil
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
index dcea9c5a671..5cc40aef55b 100644
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -15,6 +15,7 @@ package procfs
import (
"bufio"
+ "io"
"os"
"path/filepath"
"strconv"
@@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) {
return nil, err
}
- netStatFile := NetStat{
- Filename: filepath.Base(filePath),
- Stats: make(map[string][]uint64),
+ procNetstat, err := parseNetstat(file)
+ if err != nil {
+ return nil, err
+ }
+ procNetstat.Filename = filepath.Base(filePath)
+
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(r io.Reader) (NetStat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ netStat = NetStat{
+ Stats: make(map[string][]uint64),
}
- scanner := bufio.NewScanner(file)
- scanner.Scan()
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
+ )
+
+ scanner.Scan()
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return nil, err
- }
- netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
}
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
}
- netStatsTotal = append(netStatsTotal, netStatFile)
}
- return netStatsTotal, nil
+
+ return netStat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index cca03327c3f..ea83a75ffc4 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 00000000000..9df79c23799
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
index 48b5238194e..6a43bb24595 100644
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -33,139 +33,140 @@ type ProcNetstat struct {
}
type TcpExt struct { // nolint:revive
- SyncookiesSent float64
- SyncookiesRecv float64
- SyncookiesFailed float64
- EmbryonicRsts float64
- PruneCalled float64
- RcvPruned float64
- OfoPruned float64
- OutOfWindowIcmps float64
- LockDroppedIcmps float64
- ArpFilter float64
- TW float64
- TWRecycled float64
- TWKilled float64
- PAWSActive float64
- PAWSEstab float64
- DelayedACKs float64
- DelayedACKLocked float64
- DelayedACKLost float64
- ListenOverflows float64
- ListenDrops float64
- TCPHPHits float64
- TCPPureAcks float64
- TCPHPAcks float64
- TCPRenoRecovery float64
- TCPSackRecovery float64
- TCPSACKReneging float64
- TCPSACKReorder float64
- TCPRenoReorder float64
- TCPTSReorder float64
- TCPFullUndo float64
- TCPPartialUndo float64
- TCPDSACKUndo float64
- TCPLossUndo float64
- TCPLostRetransmit float64
- TCPRenoFailures float64
- TCPSackFailures float64
- TCPLossFailures float64
- TCPFastRetrans float64
- TCPSlowStartRetrans float64
- TCPTimeouts float64
- TCPLossProbes float64
- TCPLossProbeRecovery float64
- TCPRenoRecoveryFail float64
- TCPSackRecoveryFail float64
- TCPRcvCollapsed float64
- TCPDSACKOldSent float64
- TCPDSACKOfoSent float64
- TCPDSACKRecv float64
- TCPDSACKOfoRecv float64
- TCPAbortOnData float64
- TCPAbortOnClose float64
- TCPAbortOnMemory float64
- TCPAbortOnTimeout float64
- TCPAbortOnLinger float64
- TCPAbortFailed float64
- TCPMemoryPressures float64
- TCPMemoryPressuresChrono float64
- TCPSACKDiscard float64
- TCPDSACKIgnoredOld float64
- TCPDSACKIgnoredNoUndo float64
- TCPSpuriousRTOs float64
- TCPMD5NotFound float64
- TCPMD5Unexpected float64
- TCPMD5Failure float64
- TCPSackShifted float64
- TCPSackMerged float64
- TCPSackShiftFallback float64
- TCPBacklogDrop float64
- PFMemallocDrop float64
- TCPMinTTLDrop float64
- TCPDeferAcceptDrop float64
- IPReversePathFilter float64
- TCPTimeWaitOverflow float64
- TCPReqQFullDoCookies float64
- TCPReqQFullDrop float64
- TCPRetransFail float64
- TCPRcvCoalesce float64
- TCPOFOQueue float64
- TCPOFODrop float64
- TCPOFOMerge float64
- TCPChallengeACK float64
- TCPSYNChallenge float64
- TCPFastOpenActive float64
- TCPFastOpenActiveFail float64
- TCPFastOpenPassive float64
- TCPFastOpenPassiveFail float64
- TCPFastOpenListenOverflow float64
- TCPFastOpenCookieReqd float64
- TCPFastOpenBlackhole float64
- TCPSpuriousRtxHostQueues float64
- BusyPollRxPackets float64
- TCPAutoCorking float64
- TCPFromZeroWindowAdv float64
- TCPToZeroWindowAdv float64
- TCPWantZeroWindowAdv float64
- TCPSynRetrans float64
- TCPOrigDataSent float64
- TCPHystartTrainDetect float64
- TCPHystartTrainCwnd float64
- TCPHystartDelayDetect float64
- TCPHystartDelayCwnd float64
- TCPACKSkippedSynRecv float64
- TCPACKSkippedPAWS float64
- TCPACKSkippedSeq float64
- TCPACKSkippedFinWait2 float64
- TCPACKSkippedTimeWait float64
- TCPACKSkippedChallenge float64
- TCPWinProbe float64
- TCPKeepAlive float64
- TCPMTUPFail float64
- TCPMTUPSuccess float64
- TCPWqueueTooBig float64
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
}
type IpExt struct { // nolint:revive
- InNoRoutes float64
- InTruncatedPkts float64
- InMcastPkts float64
- OutMcastPkts float64
- InBcastPkts float64
- OutBcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InCsumErrors float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
- ReasmOverlaps float64
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
}
func (p Proc) Netstat() (ProcNetstat, error) {
@@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
if err != nil {
return ProcNetstat{PID: p.PID}, err
}
- procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
procNetstat.PID = p.PID
return procNetstat, err
}
-// parseNetstat parses the metrics from proc//net/netstat file
+// parseProcNetstat parses the metrics from proc//net/netstat file
// and returns a ProcNetstat structure.
-func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
var (
scanner = bufio.NewScanner(r)
procNetstat = ProcNetstat{}
@@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = value
+ procNetstat.TcpExt.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = value
+ procNetstat.TcpExt.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = value
+ procNetstat.TcpExt.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = value
+ procNetstat.TcpExt.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = value
+ procNetstat.TcpExt.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = value
+ procNetstat.TcpExt.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = value
+ procNetstat.TcpExt.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = value
+ procNetstat.TcpExt.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = value
+ procNetstat.TcpExt.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = value
+ procNetstat.TcpExt.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = value
+ procNetstat.TcpExt.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = value
+ procNetstat.TcpExt.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = value
+ procNetstat.TcpExt.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = value
+ procNetstat.TcpExt.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = value
+ procNetstat.TcpExt.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = value
+ procNetstat.TcpExt.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = value
+ procNetstat.TcpExt.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = value
+ procNetstat.TcpExt.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = value
+ procNetstat.TcpExt.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = value
+ procNetstat.TcpExt.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = value
+ procNetstat.TcpExt.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = value
+ procNetstat.TcpExt.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = value
+ procNetstat.TcpExt.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = value
+ procNetstat.TcpExt.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = value
+ procNetstat.TcpExt.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = value
+ procNetstat.TcpExt.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = value
+ procNetstat.TcpExt.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = value
+ procNetstat.TcpExt.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = value
+ procNetstat.TcpExt.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = value
+ procNetstat.TcpExt.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = value
+ procNetstat.TcpExt.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = value
+ procNetstat.TcpExt.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = value
+ procNetstat.TcpExt.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = value
+ procNetstat.TcpExt.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = value
+ procNetstat.TcpExt.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = value
+ procNetstat.TcpExt.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = value
+ procNetstat.TcpExt.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = value
+ procNetstat.TcpExt.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = value
+ procNetstat.TcpExt.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = value
+ procNetstat.TcpExt.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = value
+ procNetstat.TcpExt.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = value
+ procNetstat.TcpExt.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = value
+ procNetstat.TcpExt.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = value
+ procNetstat.TcpExt.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = value
+ procNetstat.TcpExt.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = value
+ procNetstat.TcpExt.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = value
+ procNetstat.TcpExt.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = value
+ procNetstat.TcpExt.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = value
+ procNetstat.TcpExt.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = value
+ procNetstat.TcpExt.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = value
+ procNetstat.TcpExt.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = value
+ procNetstat.TcpExt.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = value
+ procNetstat.TcpExt.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = value
+ procNetstat.TcpExt.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = value
+ procNetstat.TcpExt.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = value
+ procNetstat.TcpExt.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = value
+ procNetstat.TcpExt.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = value
+ procNetstat.TcpExt.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TcpExt.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = value
+ procNetstat.TcpExt.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = value
+ procNetstat.TcpExt.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = value
+ procNetstat.TcpExt.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = value
+ procNetstat.TcpExt.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = value
+ procNetstat.TcpExt.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = value
+ procNetstat.TcpExt.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = value
+ procNetstat.TcpExt.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = value
+ procNetstat.TcpExt.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = value
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = value
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = value
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = value
+ procNetstat.TcpExt.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = value
+ procNetstat.TcpExt.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = value
+ procNetstat.TcpExt.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = value
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = value
+ procNetstat.TcpExt.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = value
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = value
+ procNetstat.TcpExt.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = value
+ procNetstat.TcpExt.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = value
+ procNetstat.TcpExt.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = value
+ procNetstat.TcpExt.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = value
+ procNetstat.TcpExt.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = value
+ procNetstat.TcpExt.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = value
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = value
+ procNetstat.TcpExt.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = value
+ procNetstat.TcpExt.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = value
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = value
+ procNetstat.TcpExt.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = value
+ procNetstat.TcpExt.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = value
+ procNetstat.TcpExt.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = value
+ procNetstat.TcpExt.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = value
+ procNetstat.TcpExt.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = value
+ procNetstat.TcpExt.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = value
+ procNetstat.IpExt.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = value
+ procNetstat.IpExt.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = value
+ procNetstat.IpExt.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = value
+ procNetstat.IpExt.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = value
+ procNetstat.IpExt.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = value
+ procNetstat.IpExt.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = value
+ procNetstat.IpExt.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = value
+ procNetstat.IpExt.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = value
+ procNetstat.IpExt.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = value
+ procNetstat.IpExt.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = value
+ procNetstat.IpExt.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = value
+ procNetstat.IpExt.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = value
+ procNetstat.IpExt.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = value
+ procNetstat.IpExt.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = value
+ procNetstat.IpExt.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = value
+ procNetstat.IpExt.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = value
+ procNetstat.IpExt.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = value
+ procNetstat.IpExt.ReasmOverlaps = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
index ae191896cbd..6c46b718849 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -37,100 +37,100 @@ type ProcSnmp struct {
}
type Ip struct { // nolint:revive
- Forwarding float64
- DefaultTTL float64
- InReceives float64
- InHdrErrors float64
- InAddrErrors float64
- ForwDatagrams float64
- InUnknownProtos float64
- InDiscards float64
- InDelivers float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
}
-type Icmp struct {
- InMsgs float64
- InErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InTimeExcds float64
- InParmProbs float64
- InSrcQuenchs float64
- InRedirects float64
- InEchos float64
- InEchoReps float64
- InTimestamps float64
- InTimestampReps float64
- InAddrMasks float64
- InAddrMaskReps float64
- OutMsgs float64
- OutErrors float64
- OutDestUnreachs float64
- OutTimeExcds float64
- OutParmProbs float64
- OutSrcQuenchs float64
- OutRedirects float64
- OutEchos float64
- OutEchoReps float64
- OutTimestamps float64
- OutTimestampReps float64
- OutAddrMasks float64
- OutAddrMaskReps float64
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
}
type IcmpMsg struct {
- InType3 float64
- OutType3 float64
+ InType3 *float64
+ OutType3 *float64
}
type Tcp struct { // nolint:revive
- RtoAlgorithm float64
- RtoMin float64
- RtoMax float64
- MaxConn float64
- ActiveOpens float64
- PassiveOpens float64
- AttemptFails float64
- EstabResets float64
- CurrEstab float64
- InSegs float64
- OutSegs float64
- RetransSegs float64
- InErrs float64
- OutRsts float64
- InCsumErrors float64
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
}
type Udp struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
func (p Proc) Snmp() (ProcSnmp, error) {
@@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = value
+ procSnmp.Ip.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = value
+ procSnmp.Ip.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = value
+ procSnmp.Ip.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = value
+ procSnmp.Ip.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = value
+ procSnmp.Ip.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = value
+ procSnmp.Ip.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = value
+ procSnmp.Ip.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = value
+ procSnmp.Ip.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = value
+ procSnmp.Ip.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = value
+ procSnmp.Ip.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = value
+ procSnmp.Ip.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = value
+ procSnmp.Ip.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = value
+ procSnmp.Ip.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = value
+ procSnmp.Ip.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = value
+ procSnmp.Ip.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = value
+ procSnmp.Ip.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = value
+ procSnmp.Ip.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = value
+ procSnmp.Ip.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = value
+ procSnmp.Ip.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = value
+ procSnmp.Icmp.InMsgs = &value
case "InErrors":
- procSnmp.Icmp.InErrors = value
+ procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = value
+ procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = value
+ procSnmp.Icmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = value
+ procSnmp.Icmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = value
+ procSnmp.Icmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = value
+ procSnmp.Icmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = value
+ procSnmp.Icmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = value
+ procSnmp.Icmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = value
+ procSnmp.Icmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = value
+ procSnmp.Icmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = value
+ procSnmp.Icmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = value
+ procSnmp.Icmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = value
+ procSnmp.Icmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = value
+ procSnmp.Icmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = value
+ procSnmp.Icmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = value
+ procSnmp.Icmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = value
+ procSnmp.Icmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = value
+ procSnmp.Icmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = value
+ procSnmp.Icmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = value
+ procSnmp.Icmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = value
+ procSnmp.Icmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = value
+ procSnmp.Icmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = value
+ procSnmp.Icmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = value
+ procSnmp.Icmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = value
+ procSnmp.Icmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = value
+ procSnmp.Icmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = value
+ procSnmp.IcmpMsg.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = value
+ procSnmp.IcmpMsg.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = value
+ procSnmp.Tcp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = value
+ procSnmp.Tcp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = value
+ procSnmp.Tcp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = value
+ procSnmp.Tcp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = value
+ procSnmp.Tcp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = value
+ procSnmp.Tcp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = value
+ procSnmp.Tcp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = value
+ procSnmp.Tcp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = value
+ procSnmp.Tcp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = value
+ procSnmp.Tcp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = value
+ procSnmp.Tcp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = value
+ procSnmp.Tcp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = value
+ procSnmp.Tcp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = value
+ procSnmp.Tcp.OutRsts = &value
case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = value
+ procSnmp.Tcp.InCsumErrors = &value
}
case "Udp":
switch key {
case "InDatagrams":
- procSnmp.Udp.InDatagrams = value
+ procSnmp.Udp.InDatagrams = &value
case "NoPorts":
- procSnmp.Udp.NoPorts = value
+ procSnmp.Udp.NoPorts = &value
case "InErrors":
- procSnmp.Udp.InErrors = value
+ procSnmp.Udp.InErrors = &value
case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = value
+ procSnmp.Udp.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = value
+ procSnmp.Udp.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = value
+ procSnmp.Udp.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = value
+ procSnmp.Udp.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = value
+ procSnmp.Udp.IgnoredMulti = &value
}
case "UdpLite":
switch key {
case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = value
+ procSnmp.UdpLite.InDatagrams = &value
case "NoPorts":
- procSnmp.UdpLite.NoPorts = value
+ procSnmp.UdpLite.NoPorts = &value
case "InErrors":
- procSnmp.UdpLite.InErrors = value
+ procSnmp.UdpLite.InErrors = &value
case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = value
+ procSnmp.UdpLite.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = value
+ procSnmp.UdpLite.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = value
+ procSnmp.UdpLite.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = value
+ procSnmp.UdpLite.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = value
+ procSnmp.UdpLite.IgnoredMulti = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
index f611992d52c..3059cc6a136 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -36,106 +36,106 @@ type ProcSnmp6 struct {
}
type Ip6 struct { // nolint:revive
- InReceives float64
- InHdrErrors float64
- InTooBigErrors float64
- InNoRoutes float64
- InAddrErrors float64
- InUnknownProtos float64
- InTruncatedPkts float64
- InDiscards float64
- InDelivers float64
- OutForwDatagrams float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
- InMcastPkts float64
- OutMcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
}
type Icmp6 struct {
- InMsgs float64
- InErrors float64
- OutMsgs float64
- OutErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InPktTooBigs float64
- InTimeExcds float64
- InParmProblems float64
- InEchos float64
- InEchoReplies float64
- InGroupMembQueries float64
- InGroupMembResponses float64
- InGroupMembReductions float64
- InRouterSolicits float64
- InRouterAdvertisements float64
- InNeighborSolicits float64
- InNeighborAdvertisements float64
- InRedirects float64
- InMLDv2Reports float64
- OutDestUnreachs float64
- OutPktTooBigs float64
- OutTimeExcds float64
- OutParmProblems float64
- OutEchos float64
- OutEchoReplies float64
- OutGroupMembQueries float64
- OutGroupMembResponses float64
- OutGroupMembReductions float64
- OutRouterSolicits float64
- OutRouterAdvertisements float64
- OutNeighborSolicits float64
- OutNeighborAdvertisements float64
- OutRedirects float64
- OutMLDv2Reports float64
- InType1 float64
- InType134 float64
- InType135 float64
- InType136 float64
- InType143 float64
- OutType133 float64
- OutType135 float64
- OutType136 float64
- OutType143 float64
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
}
type Udp6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
}
func (p Proc) Snmp6() (ProcSnmp6, error) {
@@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = value
+ procSnmp6.Ip6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = value
+ procSnmp6.Ip6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = value
+ procSnmp6.Ip6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = value
+ procSnmp6.Ip6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = value
+ procSnmp6.Ip6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = value
+ procSnmp6.Ip6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = value
+ procSnmp6.Ip6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = value
+ procSnmp6.Ip6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = value
+ procSnmp6.Ip6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = value
+ procSnmp6.Ip6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = value
+ procSnmp6.Ip6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = value
+ procSnmp6.Ip6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = value
+ procSnmp6.Ip6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = value
+ procSnmp6.Ip6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = value
+ procSnmp6.Ip6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = value
+ procSnmp6.Ip6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = value
+ procSnmp6.Ip6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = value
+ procSnmp6.Ip6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = value
+ procSnmp6.Ip6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = value
+ procSnmp6.Ip6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = value
+ procSnmp6.Ip6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = value
+ procSnmp6.Ip6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = value
+ procSnmp6.Ip6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = value
+ procSnmp6.Ip6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = value
+ procSnmp6.Ip6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = value
+ procSnmp6.Ip6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = value
+ procSnmp6.Ip6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = value
+ procSnmp6.Ip6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = value
+ procSnmp6.Ip6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = value
+ procSnmp6.Ip6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = value
+ procSnmp6.Ip6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = value
+ procSnmp6.Ip6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = value
+ procSnmp6.Icmp6.InMsgs = &value
case "InErrors":
- procSnmp6.Icmp6.InErrors = value
+ procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = value
+ procSnmp6.Icmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = value
+ procSnmp6.Icmp6.OutErrors = &value
case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = value
+ procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = value
+ procSnmp6.Icmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = value
+ procSnmp6.Icmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = value
+ procSnmp6.Icmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = value
+ procSnmp6.Icmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = value
+ procSnmp6.Icmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = value
+ procSnmp6.Icmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = value
+ procSnmp6.Icmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = value
+ procSnmp6.Icmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = value
+ procSnmp6.Icmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = value
+ procSnmp6.Icmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = value
+ procSnmp6.Icmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = value
+ procSnmp6.Icmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = value
+ procSnmp6.Icmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = value
+ procSnmp6.Icmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = value
+ procSnmp6.Icmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = value
+ procSnmp6.Icmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = value
+ procSnmp6.Icmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = value
+ procSnmp6.Icmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = value
+ procSnmp6.Icmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = value
+ procSnmp6.Icmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = value
+ procSnmp6.Icmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = value
+ procSnmp6.Icmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = value
+ procSnmp6.Icmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = value
+ procSnmp6.Icmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = value
+ procSnmp6.Icmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = value
+ procSnmp6.Icmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = value
+ procSnmp6.Icmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = value
+ procSnmp6.Icmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = value
+ procSnmp6.Icmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = value
+ procSnmp6.Icmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = value
+ procSnmp6.Icmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = value
+ procSnmp6.Icmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = value
+ procSnmp6.Icmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = value
+ procSnmp6.Icmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = value
+ procSnmp6.Icmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = value
+ procSnmp6.Icmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = value
+ procSnmp6.Icmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = value
+ procSnmp6.Icmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = value
+ procSnmp6.Icmp6.OutType143 = &value
}
case "Udp6":
switch key {
case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = value
+ procSnmp6.Udp6.InDatagrams = &value
case "NoPorts":
- procSnmp6.Udp6.NoPorts = value
+ procSnmp6.Udp6.NoPorts = &value
case "InErrors":
- procSnmp6.Udp6.InErrors = value
+ procSnmp6.Udp6.InErrors = &value
case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = value
+ procSnmp6.Udp6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = value
+ procSnmp6.Udp6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = value
+ procSnmp6.Udp6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = value
+ procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = value
+ procSnmp6.Udp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = value
+ procSnmp6.UdpLite6.InDatagrams = &value
case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = value
+ procSnmp6.UdpLite6.NoPorts = &value
case "InErrors":
- procSnmp6.UdpLite6.InErrors = value
+ procSnmp6.UdpLite6.InErrors = &value
case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = value
+ procSnmp6.UdpLite6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = value
+ procSnmp6.UdpLite6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = value
+ procSnmp6.UdpLite6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = value
+ procSnmp6.UdpLite6.InCsumErrors = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 06c556ef962..b278eb2c2df 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -102,6 +102,8 @@ type ProcStat struct {
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
@@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) {
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
- &ignoreInt64,
+ &s.Processor,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 594022ded48..3d8c06439a9 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
kv := strings.SplitN(line, ":", 2)
// removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
// removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
+ v = strings.TrimSuffix(v, " kB")
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 33f97caa08d..586af48af9f 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -62,7 +62,7 @@ type Stat struct {
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
- CPU []CPUStat
+ CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
@@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) {
if err != nil {
return Stat{}, err
}
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
- stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
- scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) {
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
stat.CPU[cpuID] = cpuStat
}
}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 00000000000..f08bfc769db
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := fsi.FS(proc.path("task"))
+ if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index 20ceb77e2df..cdedcae996d 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -26,7 +26,9 @@ import (
)
// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string.
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go
index 3ae8ac7f3e8..4d108361da9 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go
@@ -74,7 +74,7 @@ var (
// Alias ARN: awskms:///arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias
// Alias ARN with endpoint: awskms://localhost:4566/arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias
uuidRE = `m?r?k?-?[A-Fa-f0-9]{8}-?[A-Fa-f0-9]{4}-?[A-Fa-f0-9]{4}-?[A-Fa-f0-9]{4}-?[A-Fa-f0-9]{12}`
- arnRE = `arn:(?:aws|aws-us-gov):kms:[a-z0-9-]+:\d{12}:`
+ arnRE = `arn:(?:aws|aws-us-gov|aws-cn):kms:[a-z0-9-]+:\d{12}:`
hostRE = `([^/]*)/`
keyIDRE = regexp.MustCompile(`^awskms://` + hostRE + `(` + uuidRE + `)$`)
keyARNRE = regexp.MustCompile(`^awskms://` + hostRE + `(` + arnRE + `key/` + uuidRE + `)$`)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
new file mode 100644
index 00000000000..06282ce79c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -0,0 +1,245 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ // ScopeName is the instrumentation scope name.
+ ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ // GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
+ GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+// Filter is a predicate used to determine whether a given request in
+// interceptor info should be traced. A Filter must return true if
+// the request should be traced.
+type Filter func(*InterceptorInfo) bool
+
+// config is a group of options for this instrumentation.
+type config struct {
+ Filter Filter
+ Propagators propagation.TextMapPropagator
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ SpanStartOptions []trace.SpanStartOption
+
+ ReceivedEvent bool
+ SentEvent bool
+
+ tracer trace.Tracer
+ meter metric.Meter
+
+ rpcDuration metric.Float64Histogram
+ rpcRequestSize metric.Int64Histogram
+ rpcResponseSize metric.Int64Histogram
+ rpcRequestsPerRPC metric.Int64Histogram
+ rpcResponsesPerRPC metric.Int64Histogram
+}
+
+// Option applies an option value for a config.
+type Option interface {
+ apply(*config)
+}
+
+// newConfig returns a config configured with all the passed Options.
+func newConfig(opts []Option, role string) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ TracerProvider: otel.GetTracerProvider(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, o := range opts {
+ o.apply(c)
+ }
+
+ c.tracer = c.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
+ c.meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ var err error
+ c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
+ metric.WithDescription("Measures the duration of inbound RPC."),
+ metric.WithUnit("ms"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcDuration == nil {
+ c.rpcDuration = noop.Float64Histogram{}
+ }
+ }
+
+ c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
+ metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcRequestSize == nil {
+ c.rpcRequestSize = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
+ metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcResponseSize == nil {
+ c.rpcResponseSize = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcRequestsPerRPC == nil {
+ c.rpcRequestsPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcResponsesPerRPC == nil {
+ c.rpcResponsesPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ return c
+}
+
+type propagatorsOption struct{ p propagation.TextMapPropagator }
+
+func (o propagatorsOption) apply(c *config) {
+ if o.p != nil {
+ c.Propagators = o.p
+ }
+}
+
+// WithPropagators returns an Option to use the Propagators when extracting
+// and injecting trace context from requests.
+func WithPropagators(p propagation.TextMapPropagator) Option {
+ return propagatorsOption{p: p}
+}
+
+type tracerProviderOption struct{ tp trace.TracerProvider }
+
+func (o tracerProviderOption) apply(c *config) {
+ if o.tp != nil {
+ c.TracerProvider = o.tp
+ }
+}
+
+// WithInterceptorFilter returns an Option to use the request filter.
+//
+// Deprecated: Use stats handlers instead.
+func WithInterceptorFilter(f Filter) Option {
+ return interceptorFilterOption{f: f}
+}
+
+type interceptorFilterOption struct {
+ f Filter
+}
+
+func (o interceptorFilterOption) apply(c *config) {
+ if o.f != nil {
+ c.Filter = o.f
+ }
+}
+
+// WithTracerProvider returns an Option to use the TracerProvider when
+// creating a Tracer.
+func WithTracerProvider(tp trace.TracerProvider) Option {
+ return tracerProviderOption{tp: tp}
+}
+
+type meterProviderOption struct{ mp metric.MeterProvider }
+
+func (o meterProviderOption) apply(c *config) {
+ if o.mp != nil {
+ c.MeterProvider = o.mp
+ }
+}
+
+// WithMeterProvider returns an Option to use the MeterProvider when
+// creating a Meter. If this option is not provide the global MeterProvider will be used.
+func WithMeterProvider(mp metric.MeterProvider) Option {
+ return meterProviderOption{mp: mp}
+}
+
+// Event type that can be recorded, see WithMessageEvents.
+type Event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReceivedEvents Event = iota
+ SentEvents
+)
+
+type messageEventsProviderOption struct {
+ events []Event
+}
+
+func (m messageEventsProviderOption) apply(c *config) {
+ for _, e := range m.events {
+ switch e {
+ case ReceivedEvents:
+ c.ReceivedEvent = true
+ case SentEvents:
+ c.SentEvent = true
+ }
+ }
+}
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReceivedEvents: Record the number of bytes read after every gRPC read operation.
+// - SentEvents: Record the number of bytes written after every gRPC write operation.
+func WithMessageEvents(events ...Event) Option {
+ return messageEventsProviderOption{events: events}
+}
+
+type spanStartOption struct{ opts []trace.SpanStartOption }
+
+func (o spanStartOption) apply(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, o.opts...)
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return spanStartOption{opts}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
new file mode 100644
index 00000000000..958dcd87a4c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
@@ -0,0 +1,22 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
+
+Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
+
+Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
+*/
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
new file mode 100644
index 00000000000..3b487a93623
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -0,0 +1,540 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+// gRPC tracing middleware
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
+import (
+ "context"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "google.golang.org/grpc"
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type messageType attribute.KeyValue
+
+// Event adds an event of the messageType to the span associated with the
+// passed context with a message id.
+func (m messageType) Event(ctx context.Context, id int, _ interface{}) {
+ span := trace.SpanFromContext(ctx)
+ if !span.IsRecording() {
+ return
+ }
+ span.AddEvent("message", trace.WithAttributes(
+ attribute.KeyValue(m),
+ RPCMessageIDKey.Int(id),
+ ))
+}
+
+var (
+ messageSent = messageType(RPCMessageTypeSent)
+ messageReceived = messageType(RPCMessageTypeReceived)
+)
+
+// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
+// for use in a grpc.Dial call.
+//
+// Deprecated: Use [NewClientHandler] instead.
+func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
+ cfg := newConfig(opts, "client")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ method string,
+ req, reply interface{},
+ cc *grpc.ClientConn,
+ invoker grpc.UnaryInvoker,
+ callOpts ...grpc.CallOption,
+ ) error {
+ i := &InterceptorInfo{
+ Method: method,
+ Type: UnaryClient,
+ }
+ if cfg.Filter != nil && !cfg.Filter(i) {
+ return invoker(ctx, method, req, reply, cc, callOpts...)
+ }
+
+ name, attr, _ := telemetryAttributes(method, cc.Target())
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ ctx,
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ ctx = inject(ctx, cfg.Propagators)
+
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, req)
+ }
+
+ err := invoker(ctx, method, req, reply, cc, callOpts...)
+
+ if cfg.ReceivedEvent {
+ messageReceived.Event(ctx, 1, reply)
+ }
+
+ if err != nil {
+ s, _ := status.FromError(err)
+ span.SetStatus(codes.Error, s.Message())
+ span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ return err
+ }
+}
+
+// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
+// SendMsg method call.
+type clientStream struct {
+ grpc.ClientStream
+ desc *grpc.StreamDesc
+
+ span trace.Span
+
+ receivedEvent bool
+ sentEvent bool
+
+ receivedMessageID int
+ sentMessageID int
+}
+
+var _ = proto.Marshal
+
+func (w *clientStream) RecvMsg(m interface{}) error {
+ err := w.ClientStream.RecvMsg(m)
+
+ if err == nil && !w.desc.ServerStreams {
+ w.endSpan(nil)
+ } else if err == io.EOF {
+ w.endSpan(nil)
+ } else if err != nil {
+ w.endSpan(err)
+ } else {
+ w.receivedMessageID++
+
+ if w.receivedEvent {
+ messageReceived.Event(w.Context(), w.receivedMessageID, m)
+ }
+ }
+
+ return err
+}
+
+func (w *clientStream) SendMsg(m interface{}) error {
+ err := w.ClientStream.SendMsg(m)
+
+ w.sentMessageID++
+
+ if w.sentEvent {
+ messageSent.Event(w.Context(), w.sentMessageID, m)
+ }
+
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return err
+}
+
+func (w *clientStream) Header() (metadata.MD, error) {
+ md, err := w.ClientStream.Header()
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return md, err
+}
+
+func (w *clientStream) CloseSend() error {
+ err := w.ClientStream.CloseSend()
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return err
+}
+
+func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
+ return &clientStream{
+ ClientStream: s,
+ span: span,
+ desc: desc,
+ receivedEvent: cfg.ReceivedEvent,
+ sentEvent: cfg.SentEvent,
+ }
+}
+
+func (w *clientStream) endSpan(err error) {
+ if err != nil {
+ s, _ := status.FromError(err)
+ w.span.SetStatus(codes.Error, s.Message())
+ w.span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ w.span.End()
+}
+
+// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
+// for use in a grpc.Dial call.
+//
+// Deprecated: Use [NewClientHandler] instead.
+func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
+ cfg := newConfig(opts, "client")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ desc *grpc.StreamDesc,
+ cc *grpc.ClientConn,
+ method string,
+ streamer grpc.Streamer,
+ callOpts ...grpc.CallOption,
+ ) (grpc.ClientStream, error) {
+ i := &InterceptorInfo{
+ Method: method,
+ Type: StreamClient,
+ }
+ if cfg.Filter != nil && !cfg.Filter(i) {
+ return streamer(ctx, desc, cc, method, callOpts...)
+ }
+
+ name, attr, _ := telemetryAttributes(method, cc.Target())
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ ctx,
+ name,
+ startOpts...,
+ )
+
+ ctx = inject(ctx, cfg.Propagators)
+
+ s, err := streamer(ctx, desc, cc, method, callOpts...)
+ if err != nil {
+ grpcStatus, _ := status.FromError(err)
+ span.SetStatus(codes.Error, grpcStatus.Message())
+ span.SetAttributes(statusCodeAttr(grpcStatus.Code()))
+ span.End()
+ return s, err
+ }
+ stream := wrapClientStream(ctx, s, desc, span, cfg)
+ return stream, nil
+ }
+}
+
+// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
+// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
+func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
+ cfg := newConfig(opts, "server")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ req interface{},
+ info *grpc.UnaryServerInfo,
+ handler grpc.UnaryHandler,
+ ) (interface{}, error) {
+ i := &InterceptorInfo{
+ UnaryServerInfo: info,
+ Type: UnaryServer,
+ }
+ if cfg.Filter != nil && !cfg.Filter(i) {
+ return handler(ctx, req)
+ }
+
+ ctx = extract(ctx, cfg.Propagators)
+ name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ if cfg.ReceivedEvent {
+ messageReceived.Event(ctx, 1, req)
+ }
+
+ before := time.Now()
+
+ resp, err := handler(ctx, req)
+
+ s, _ := status.FromError(err)
+ if err != nil {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, s.Proto())
+ }
+ } else {
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, resp)
+ }
+ }
+ grpcStatusCodeAttr := statusCodeAttr(s.Code())
+ span.SetAttributes(grpcStatusCodeAttr)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
+
+ metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
+ cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
+
+ return resp, err
+ }
+}
+
+// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
+// SendMsg method call.
+type serverStream struct {
+ grpc.ServerStream
+ ctx context.Context
+
+ receivedMessageID int
+ sentMessageID int
+
+ receivedEvent bool
+ sentEvent bool
+}
+
+func (w *serverStream) Context() context.Context {
+ return w.ctx
+}
+
+func (w *serverStream) RecvMsg(m interface{}) error {
+ err := w.ServerStream.RecvMsg(m)
+
+ if err == nil {
+ w.receivedMessageID++
+ if w.receivedEvent {
+ messageReceived.Event(w.Context(), w.receivedMessageID, m)
+ }
+ }
+
+ return err
+}
+
+func (w *serverStream) SendMsg(m interface{}) error {
+ err := w.ServerStream.SendMsg(m)
+
+ w.sentMessageID++
+ if w.sentEvent {
+ messageSent.Event(w.Context(), w.sentMessageID, m)
+ }
+
+ return err
+}
+
+func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream {
+ return &serverStream{
+ ServerStream: ss,
+ ctx: ctx,
+ receivedEvent: cfg.ReceivedEvent,
+ sentEvent: cfg.SentEvent,
+ }
+}
+
+// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
+// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
+func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
+ cfg := newConfig(opts, "server")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ srv interface{},
+ ss grpc.ServerStream,
+ info *grpc.StreamServerInfo,
+ handler grpc.StreamHandler,
+ ) error {
+ ctx := ss.Context()
+ i := &InterceptorInfo{
+ StreamServerInfo: info,
+ Type: StreamServer,
+ }
+ if cfg.Filter != nil && !cfg.Filter(i) {
+ return handler(srv, wrapServerStream(ctx, ss, cfg))
+ }
+
+ ctx = extract(ctx, cfg.Propagators)
+ name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ err := handler(srv, wrapServerStream(ctx, ss, cfg))
+ if err != nil {
+ s, _ := status.FromError(err)
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ return err
+ }
+}
+
+// telemetryAttributes returns a span name and span and metric attributes from
+// the gRPC method and peer address.
+func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
+ name, methodAttrs := internal.ParseFullMethod(fullMethod)
+ peerAttrs := peerAttr(peerAddress)
+
+ attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
+ attrs = append(attrs, RPCSystemGRPC)
+ attrs = append(attrs, methodAttrs...)
+ metricAttrs := attrs[:1+len(methodAttrs)]
+ attrs = append(attrs, peerAttrs...)
+ return name, attrs, metricAttrs
+}
+
+// peerAttr returns attributes about the peer address.
+func peerAttr(addr string) []attribute.KeyValue {
+ host, p, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil
+ }
+
+ if host == "" {
+ host = "127.0.0.1"
+ }
+ port, err := strconv.Atoi(p)
+ if err != nil {
+ return nil
+ }
+
+ var attr []attribute.KeyValue
+ if ip := net.ParseIP(host); ip != nil {
+ attr = []attribute.KeyValue{
+ semconv.NetSockPeerAddr(host),
+ semconv.NetSockPeerPort(port),
+ }
+ } else {
+ attr = []attribute.KeyValue{
+ semconv.NetPeerName(host),
+ semconv.NetPeerPort(port),
+ }
+ }
+
+ return attr
+}
+
+// peerFromCtx returns a peer address from a context, if one exists.
+func peerFromCtx(ctx context.Context) string {
+ p, ok := peer.FromContext(ctx)
+ if !ok {
+ return ""
+ }
+ return p.Addr.String()
+}
+
+// statusCodeAttr returns status code attribute based on given gRPC code.
+func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue {
+ return GRPCStatusCodeKey.Int64(int64(c))
+}
+
+// serverStatus returns a span status code and message for a given gRPC
+// status code. It maps specific gRPC status codes to a corresponding span
+// status code and message. This function is intended for use on the server
+// side of a gRPC connection.
+//
+// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented,
+// Internal, Unavailable, or DataLoss, it returns a span status code of Error
+// and the message from the gRPC status. Otherwise, it returns a span status
+// code of Unset and an empty message.
+func serverStatus(grpcStatus *status.Status) (codes.Code, string) {
+ switch grpcStatus.Code() {
+ case grpc_codes.Unknown,
+ grpc_codes.DeadlineExceeded,
+ grpc_codes.Unimplemented,
+ grpc_codes.Internal,
+ grpc_codes.Unavailable,
+ grpc_codes.DataLoss:
+ return codes.Error, grpcStatus.Message()
+ default:
+ return codes.Unset, ""
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
new file mode 100644
index 00000000000..f6116946bfd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "google.golang.org/grpc"
+)
+
+// InterceptorType is the flag to define which gRPC interceptor
+// the InterceptorInfo object is.
+type InterceptorType uint8
+
+const (
+ // UndefinedInterceptor is the type for the interceptor information that is not
+ // well initialized or categorized to other types.
+ UndefinedInterceptor InterceptorType = iota
+ // UnaryClient is the type for grpc.UnaryClient interceptor.
+ UnaryClient
+ // StreamClient is the type for grpc.StreamClient interceptor.
+ StreamClient
+ // UnaryServer is the type for grpc.UnaryServer interceptor.
+ UnaryServer
+ // StreamServer is the type for grpc.StreamServer interceptor.
+ StreamServer
+)
+
+// InterceptorInfo is the union of some arguments to four types of
+// gRPC interceptors.
+type InterceptorInfo struct {
+ // Method is method name registered to UnaryClient and StreamClient
+ Method string
+ // UnaryServerInfo is the metadata for UnaryServer
+ UnaryServerInfo *grpc.UnaryServerInfo
+ // StreamServerInfo if the metadata for StreamServer
+ StreamServerInfo *grpc.StreamServerInfo
+ // Type is the type for interceptor
+ Type InterceptorType
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
new file mode 100644
index 00000000000..cf32a9e978c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -0,0 +1,51 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+
+import (
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+// ParseFullMethod returns a span name following the OpenTelemetry semantic
+// conventions as well as all applicable span attribute.KeyValue attributes based
+// on a gRPC's FullMethod.
+//
+// Parsing is consistent with grpc-go implementation:
+// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39
+func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
+ if !strings.HasPrefix(fullMethod, "/") {
+ // Invalid format, does not follow `/package.service/method`.
+ return fullMethod, nil
+ }
+ name := fullMethod[1:]
+ pos := strings.LastIndex(name, "/")
+ if pos < 0 {
+ // Invalid format, does not follow `/package.service/method`.
+ return name, nil
+ }
+ service, method := name[:pos], name[pos+1:]
+
+ var attrs []attribute.KeyValue
+ if service != "" {
+ attrs = append(attrs, semconv.RPCService(service))
+ }
+ if method != "" {
+ attrs = append(attrs, semconv.RPCMethod(method))
+ }
+ return name, attrs
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
new file mode 100644
index 00000000000..f585fb6ae0c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -0,0 +1,98 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+
+ "go.opentelemetry.io/otel/baggage"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type metadataSupplier struct {
+ metadata *metadata.MD
+}
+
+// assert that metadataSupplier implements the TextMapCarrier interface.
+var _ propagation.TextMapCarrier = &metadataSupplier{}
+
+func (s *metadataSupplier) Get(key string) string {
+ values := s.metadata.Get(key)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+func (s *metadataSupplier) Set(key string, value string) {
+ s.metadata.Set(key, value)
+}
+
+func (s *metadataSupplier) Keys() []string {
+ out := make([]string, 0, len(*s.metadata))
+ for key := range *s.metadata {
+ out = append(out, key)
+ }
+ return out
+}
+
+// Inject injects correlation context and span context into the gRPC
+// metadata object. This function is meant to be used on outgoing
+// requests.
+// Deprecated: Unnecessary public func.
+func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
+ c := newConfig(opts, "")
+ c.Propagators.Inject(ctx, &metadataSupplier{
+ metadata: md,
+ })
+}
+
+func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+ propagators.Inject(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+ return metadata.NewOutgoingContext(ctx, md)
+}
+
+// Extract returns the correlation context and span context that
+// another service encoded in the gRPC metadata object with Inject.
+// This function is meant to be used on incoming requests.
+// Deprecated: Unnecessary public func.
+func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
+ c := newConfig(opts, "")
+ ctx = c.Propagators.Extract(ctx, &metadataSupplier{
+ metadata: md,
+ })
+
+ return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
+}
+
+func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+
+ return propagators.Extract(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
new file mode 100644
index 00000000000..b65fab308f3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
@@ -0,0 +1,52 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+// Semantic conventions for attribute keys for gRPC.
+const (
+ // Name of message transmitted or received.
+ RPCNameKey = attribute.Key("name")
+
+ // Type of message transmitted or received.
+ RPCMessageTypeKey = attribute.Key("message.type")
+
+ // Identifier of message transmitted or received.
+ RPCMessageIDKey = attribute.Key("message.id")
+
+ // The compressed size of the message transmitted or received in bytes.
+ RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // The uncompressed size of the message transmitted or received in
+ // bytes.
+ RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+// Semantic conventions for common RPC attributes.
+var (
+ // Semantic convention for gRPC as the remoting system.
+ RPCSystemGRPC = semconv.RPCSystemGRPC
+
+ // Semantic convention for a message named message.
+ RPCNameMessage = RPCNameKey.String("message")
+
+ // Semantic conventions for RPC message types.
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
new file mode 100644
index 00000000000..73d2b8b6b27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -0,0 +1,218 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type gRPCContextKey struct{}
+
+type gRPCContext struct {
+ messagesReceived int64
+ messagesSent int64
+ metricAttrs []attribute.KeyValue
+}
+
+type serverHandler struct {
+ *config
+}
+
+// NewServerHandler creates a stats.Handler for a gRPC server.
+func NewServerHandler(opts ...Option) stats.Handler {
+ h := &serverHandler{
+ config: newConfig(opts, "server"),
+ }
+
+ return h
+}
+
+// TagConn can attach some information to the given context.
+func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
+}
+
+// TagRPC can attach some information to the given context.
+func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ ctx = extract(ctx, h.config.Propagators)
+
+ name, attrs := internal.ParseFullMethod(info.FullMethodName)
+ attrs = append(attrs, RPCSystemGRPC)
+ ctx, _ = h.tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attrs...),
+ )
+
+ gctx := gRPCContext{
+ metricAttrs: attrs,
+ }
+ return context.WithValue(ctx, gRPCContextKey{}, &gctx)
+}
+
+// HandleRPC processes the RPC stats.
+func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ isServer := true
+ h.handleRPC(ctx, rs, isServer)
+}
+
+type clientHandler struct {
+ *config
+}
+
+// NewClientHandler creates a stats.Handler for a gRPC client.
+func NewClientHandler(opts ...Option) stats.Handler {
+ h := &clientHandler{
+ config: newConfig(opts, "client"),
+ }
+
+ return h
+}
+
+// TagRPC can attach some information to the given context.
+func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ name, attrs := internal.ParseFullMethod(info.FullMethodName)
+ attrs = append(attrs, RPCSystemGRPC)
+ ctx, _ = h.tracer.Start(
+ ctx,
+ name,
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attrs...),
+ )
+
+ gctx := gRPCContext{
+ metricAttrs: attrs,
+ }
+
+ return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
+}
+
+// HandleRPC processes the RPC stats.
+func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ isServer := false
+ h.handleRPC(ctx, rs, isServer)
+}
+
+// TagConn can attach some information to the given context.
+func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
+ // no-op
+}
+
+func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag.
+ span := trace.SpanFromContext(ctx)
+ var metricAttrs []attribute.KeyValue
+ var messageId int64
+
+ gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
+ if gctx != nil {
+ metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
+ metricAttrs = append(metricAttrs, gctx.metricAttrs...)
+ }
+
+ switch rs := rs.(type) {
+ case *stats.Begin:
+ case *stats.InPayload:
+ if gctx != nil {
+ messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
+ c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
+ }
+
+ if c.ReceivedEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeReceived,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutPayload:
+ if gctx != nil {
+ messageId = atomic.AddInt64(&gctx.messagesSent, 1)
+ c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
+ }
+
+ if c.SentEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeSent,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutTrailer:
+ case *stats.OutHeader:
+ if p, ok := peer.FromContext(ctx); ok {
+ span.SetAttributes(peerAttr(p.Addr.String())...)
+ }
+ case *stats.End:
+ var rpcStatusAttr attribute.KeyValue
+
+ if rs.Error != nil {
+ s, _ := status.FromError(rs.Error)
+ if isServer {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ } else {
+ span.SetStatus(codes.Error, s.Message())
+ }
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
+ } else {
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
+ }
+ span.SetAttributes(rpcStatusAttr)
+ span.End()
+
+ metricAttrs = append(metricAttrs, rpcStatusAttr)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
+
+ c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
+ if gctx != nil {
+ c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...))
+ c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...))
+ }
+ default:
+ return
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
new file mode 100644
index 00000000000..001b2fb413e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+// Version is the current release version of the gRPC instrumentation.
+func Version() string {
+ return "0.48.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
new file mode 100644
index 00000000000..92b8cf73c97
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
+// Please be careful of intitialization order - for example, if you change
+// the global propagator, the DefaultClient might still be using the old one.
+var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
+
+// Get is a convenient replacement for http.Get that adds a span around the request.
+func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Head is a convenient replacement for http.Head that adds a span around the request.
+func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ return DefaultClient.Do(req)
+}
+
+// Post is a convenient replacement for http.Post that adds a span around the request.
+func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
+ req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", contentType)
+ return DefaultClient.Do(req)
+}
+
+// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
+func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
+ return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
new file mode 100644
index 00000000000..c6f438774f7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -0,0 +1,60 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "net/http"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Attribute keys that can be added to a span.
+const (
+ ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
+ ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
+ WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
+ WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
+)
+
+// Server HTTP metrics.
+const (
+ // Deprecated: This field is unused.
+ RequestCount = "http.server.request_count" // Incoming request count total
+ // Deprecated: Use of this field has been migrated to serverRequestSize. It will be removed in a future version.
+ RequestContentLength = "http.server.request_content_length" // Incoming request bytes total
+ // Deprecated: Use of this field has been migrated to serverResponseSize. It will be removed in a future version.
+ ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total
+ // Deprecated: Use of this field has been migrated to serverDuration. It will be removed in a future version.
+ ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds
+ serverRequestSize = "http.server.request.size" // Incoming request bytes total
+ serverResponseSize = "http.server.response.size" // Incoming response bytes total
+ serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
+)
+
+// Client HTTP metrics.
+const (
+ clientRequestSize = "http.client.request.size" // Outgoing request bytes total
+ clientResponseSize = "http.client.response.size" // Outgoing response bytes total
+ clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
+)
+
+// Filter is a predicate used to determine whether a given http.request should
+// be traced. A Filter must return true if the request should be traced.
+type Filter func(*http.Request) bool
+
+func newTracer(tp trace.TracerProvider) trace.Tracer {
+ return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
new file mode 100644
index 00000000000..a1b5b5e5aa8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -0,0 +1,207 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptrace"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// ScopeName is the instrumentation scope name.
+const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// config represents the configuration options available for the http.Handler
+// and http.Transport types.
+type config struct {
+ ServerName string
+ Tracer trace.Tracer
+ Meter metric.Meter
+ Propagators propagation.TextMapPropagator
+ SpanStartOptions []trace.SpanStartOption
+ PublicEndpoint bool
+ PublicEndpointFn func(*http.Request) bool
+ ReadEvent bool
+ WriteEvent bool
+ Filters []Filter
+ SpanNameFormatter func(string, *http.Request) string
+ ClientTrace func(context.Context) *httptrace.ClientTrace
+
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+}
+
+// Option interface used for setting optional config properties.
+type Option interface {
+ apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+ o(c)
+}
+
+// newConfig creates a new config struct and applies opts to it.
+func newConfig(opts ...Option) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+
+ // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
+ if c.TracerProvider != nil {
+ c.Tracer = newTracer(c.TracerProvider)
+ }
+
+ c.Meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ )
+
+ return c
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider trace.TracerProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.TracerProvider = provider
+ }
+ })
+}
+
+// WithMeterProvider specifies a meter provider to use for creating a meter.
+// If none is specified, the global provider is used.
+func WithMeterProvider(provider metric.MeterProvider) Option {
+ return optionFunc(func(cfg *config) {
+ if provider != nil {
+ cfg.MeterProvider = provider
+ }
+ })
+}
+
+// WithPublicEndpoint configures the Handler to link the span with an incoming
+// span context. If this option is not provided, then the association is a child
+// association instead of a link.
+func WithPublicEndpoint() Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpoint = true
+ })
+}
+
+// WithPublicEndpointFn runs with every request, and allows conditionnally
+// configuring the Handler to link the span with an incoming span context. If
+// this option is not provided or returns false, then the association is a
+// child association instead of a link.
+// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
+func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
+ return optionFunc(func(c *config) {
+ c.PublicEndpointFn = fn
+ })
+}
+
+// WithPropagators configures specific propagators. If this
+// option isn't specified, then the global TextMapPropagator is used.
+func WithPropagators(ps propagation.TextMapPropagator) Option {
+ return optionFunc(func(c *config) {
+ if ps != nil {
+ c.Propagators = ps
+ }
+ })
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return optionFunc(func(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, opts...)
+ })
+}
+
+// WithFilter adds a filter to the list of filters used by the handler.
+// If any filter indicates to exclude a request then the request will not be
+// traced. All filters must allow a request to be traced for a Span to be created.
+// If no filters are provided then all requests are traced.
+// Filters will be invoked for each processed request, it is advised to make them
+// simple and fast.
+func WithFilter(f Filter) Option {
+ return optionFunc(func(c *config) {
+ c.Filters = append(c.Filters, f)
+ })
+}
+
+type event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReadEvents event = iota
+ WriteEvents
+)
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
+// using the ReadBytesKey
+// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
+// using the WriteBytesKey
+func WithMessageEvents(events ...event) Option {
+ return optionFunc(func(c *config) {
+ for _, e := range events {
+ switch e {
+ case ReadEvents:
+ c.ReadEvent = true
+ case WriteEvents:
+ c.WriteEvent = true
+ }
+ }
+ })
+}
+
+// WithSpanNameFormatter takes a function that will be called on every
+// request and the returned string will become the Span Name.
+func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
+ return optionFunc(func(c *config) {
+ c.SpanNameFormatter = f
+ })
+}
+
+// WithClientTrace takes a function that returns client trace instance that will be
+// applied to the requests sent through the otelhttp Transport.
+func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
+ return optionFunc(func(c *config) {
+ c.ClientTrace = f
+ })
+}
+
+// WithServerName returns an Option that sets the name of the (virtual) server
+// handling requests.
+func WithServerName(server string) Option {
+ return optionFunc(func(c *config) {
+ c.ServerName = server
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
new file mode 100644
index 00000000000..38c7f01c71a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package otelhttp provides an http.Handler and functions that are intended
+// to be used to add tracing by wrapping existing handlers (with Handler) and
+// routes WithRouteTag.
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
new file mode 100644
index 00000000000..3d292dab6d3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -0,0 +1,283 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/felixge/httpsnoop"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// middleware is an http middleware which wraps the next handler in a span.
+type middleware struct {
+ operation string
+ server string
+
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ readEvent bool
+ writeEvent bool
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ publicEndpoint bool
+ publicEndpointFn func(*http.Request) bool
+
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ serverLatencyMeasure metric.Float64Histogram
+}
+
+func defaultHandlerFormatter(operation string, _ *http.Request) string {
+ return operation
+}
+
+// NewHandler wraps the passed handler in a span named after the operation and
+// enriches it with metrics.
+func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
+ return NewMiddleware(operation, opts...)(handler)
+}
+
+// NewMiddleware returns a tracing and metrics instrumentation middleware.
+// The handler returned by the middleware wraps a handler
+// in a span named after the operation and enriches it with metrics.
+func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
+ h := middleware{
+ operation: operation,
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
+ WithSpanNameFormatter(defaultHandlerFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ h.configure(c)
+ h.createMeasures()
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.serveHTTP(w, r, next)
+ })
+ }
+}
+
+func (h *middleware) configure(c *config) {
+ h.tracer = c.Tracer
+ h.meter = c.Meter
+ h.propagators = c.Propagators
+ h.spanStartOptions = c.SpanStartOptions
+ h.readEvent = c.ReadEvent
+ h.writeEvent = c.WriteEvent
+ h.filters = c.Filters
+ h.spanNameFormatter = c.SpanNameFormatter
+ h.publicEndpoint = c.PublicEndpoint
+ h.publicEndpointFn = c.PublicEndpointFn
+ h.server = c.ServerName
+}
+
+func handleErr(err error) {
+ if err != nil {
+ otel.Handle(err)
+ }
+}
+
+func (h *middleware) createMeasures() {
+ var err error
+ h.requestBytesCounter, err = h.meter.Int64Counter(
+ serverRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ h.responseBytesCounter, err = h.meter.Int64Counter(
+ serverResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ h.serverLatencyMeasure, err = h.meter.Float64Histogram(
+ serverDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of inbound HTTP requests."),
+ )
+ handleErr(err)
+}
+
+// serveHTTP sets up tracing and calls the given next http.Handler with the span
+// context injected into the request context.
+func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
+ requestStartTime := time.Now()
+ for _, f := range h.filters {
+ if !f(r) {
+ // Simply pass through to the handler if a filter rejects the request
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
+ opts := []trace.SpanStartOption{
+ trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...),
+ }
+ if h.server != "" {
+ hostAttr := semconv.NetHostName(h.server)
+ opts = append(opts, trace.WithAttributes(hostAttr))
+ }
+ opts = append(opts, h.spanStartOptions...)
+ if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
+ opts = append(opts, trace.WithNewRoot())
+ // Linking incoming span context if any for public endpoint.
+ if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
+ opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
+ }
+ }
+
+ tracer := h.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
+ defer span.End()
+
+ readRecordFunc := func(int64) {}
+ if h.readEvent {
+ readRecordFunc = func(n int64) {
+ span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
+ }
+ }
+
+ var bw bodyWrapper
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
+ bw.ReadCloser = r.Body
+ bw.record = readRecordFunc
+ r.Body = &bw
+ }
+
+ writeRecordFunc := func(int64) {}
+ if h.writeEvent {
+ writeRecordFunc = func(n int64) {
+ span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
+ }
+ }
+
+ rww := &respWriterWrapper{
+ ResponseWriter: w,
+ record: writeRecordFunc,
+ ctx: ctx,
+ props: h.propagators,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
+
+ // Wrap w to use our ResponseWriter methods while also exposing
+ // other interfaces that w may implement (http.CloseNotifier,
+ // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
+
+ w = httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
+ return rww.Header
+ },
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return rww.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return rww.WriteHeader
+ },
+ })
+
+ labeler := &Labeler{}
+ ctx = injectLabeler(ctx, labeler)
+
+ next.ServeHTTP(w, r.WithContext(ctx))
+
+ setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err)
+
+ // Add metrics
+ attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
+ if rww.statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
+ }
+ o := metric.WithAttributes(attributes...)
+ h.requestBytesCounter.Add(ctx, bw.read, o)
+ h.responseBytesCounter.Add(ctx, rww.written, o)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
+}
+
+func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) {
+ attributes := []attribute.KeyValue{}
+
+ // TODO: Consider adding an event after each read and write, possibly as an
+ // option (defaulting to off), so as to not create needlessly verbose spans.
+ if read > 0 {
+ attributes = append(attributes, ReadBytesKey.Int64(read))
+ }
+ if rerr != nil && rerr != io.EOF {
+ attributes = append(attributes, ReadErrorKey.String(rerr.Error()))
+ }
+ if wrote > 0 {
+ attributes = append(attributes, WroteBytesKey.Int64(wrote))
+ }
+ if statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+ }
+ span.SetStatus(semconvutil.HTTPServerStatus(statusCode))
+
+ if werr != nil && werr != io.EOF {
+ attributes = append(attributes, WriteErrorKey.String(werr.Error()))
+ }
+ span.SetAttributes(attributes...)
+}
+
+// WithRouteTag annotates spans and metrics with the provided route name
+// with HTTP route attribute.
+func WithRouteTag(route string, h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ attr := semconv.HTTPRouteKey.String(route)
+
+ span := trace.SpanFromContext(r.Context())
+ span.SetAttributes(attr)
+
+ labeler, _ := LabelerFromContext(r.Context())
+ labeler.Add(attr)
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
new file mode 100644
index 00000000000..edf4ce3d315
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+// Generate semconvutil package:
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
new file mode 100644
index 00000000000..495d700cfa8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
@@ -0,0 +1,587 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/httpconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// HTTPClientResponse returns trace attributes for an HTTP response received by a
+// client from a server. It will return the following attributes if the related
+// values are defined in resp: "http.status.code",
+// "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...)
+func HTTPClientResponse(resp *http.Response) []attribute.KeyValue {
+ return hc.ClientResponse(resp)
+}
+
+// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.url", "http.flavor",
+// "http.method", "net.peer.name". The following attributes are returned if the
+// related values are defined in req: "net.peer.port", "http.user_agent",
+// "http.request_content_length", "enduser.id".
+func HTTPClientRequest(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequest(req)
+}
+
+// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
+// The following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the
+// related values are defined in req: "net.peer.port".
+func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ return hc.ClientRequestMetrics(req)
+}
+
+// HTTPClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func HTTPClientStatus(code int) (codes.Code, string) {
+ return hc.ClientStatus(code)
+}
+
+// HTTPServerRequest returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "http.target", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port",
+// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
+// "http.client_ip".
+func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequest(server, req)
+}
+
+// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port".
+func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ return hc.ServerRequestMetrics(server, req)
+}
+
+// HTTPServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func HTTPServerStatus(code int) (codes.Code, string) {
+ return hc.ServerStatus(code)
+}
+
+// httpConv are the HTTP semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type httpConv struct {
+ NetConv *netConv
+
+ HTTPClientIPKey attribute.Key
+ HTTPMethodKey attribute.Key
+ HTTPRequestContentLengthKey attribute.Key
+ HTTPResponseContentLengthKey attribute.Key
+ HTTPRouteKey attribute.Key
+ HTTPSchemeHTTP attribute.KeyValue
+ HTTPSchemeHTTPS attribute.KeyValue
+ HTTPStatusCodeKey attribute.Key
+ HTTPTargetKey attribute.Key
+ HTTPURLKey attribute.Key
+ UserAgentOriginalKey attribute.Key
+}
+
+var hc = &httpConv{
+ NetConv: nc,
+
+ HTTPClientIPKey: semconv.HTTPClientIPKey,
+ HTTPMethodKey: semconv.HTTPMethodKey,
+ HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
+ HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
+ HTTPRouteKey: semconv.HTTPRouteKey,
+ HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
+ HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
+ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
+ HTTPTargetKey: semconv.HTTPTargetKey,
+ HTTPURLKey: semconv.HTTPURLKey,
+ UserAgentOriginalKey: semconv.UserAgentOriginalKey,
+}
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. The following attributes are returned if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+// append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.status_code int
+ http.response_content_length int
+ */
+ var n int
+ if resp.StatusCode > 0 {
+ n++
+ }
+ if resp.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
+ }
+ if resp.ContentLength > 0 {
+ attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
+ }
+ return attrs
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.method",
+// "net.peer.name". The following attributes are returned if the related values
+// are defined in req: "net.peer.port", "http.user_agent",
+// "http.request_content_length", "user_agent.original".
+func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ user_agent.original string
+ http.url string
+ net.peer.name string
+ net.peer.port int
+ http.request_content_length int
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response. See ClientResponse.
+ http.response_content_length This requires the response. See ClientResponse.
+ net.sock.family This requires the socket used.
+ net.sock.peer.addr This requires the socket used.
+ net.sock.peer.name This requires the socket used.
+ net.sock.peer.port This requires the socket used.
+ http.resend_count This is something outside of a single request.
+ net.protocol.name The value is the Request is ignored, and the go client will always use "http".
+ net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
+ */
+ n := 3 // URL, peer name, proto, and method.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+ if req.ContentLength > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, c.HTTPURLKey.String(u))
+
+ attrs = append(attrs, c.NetConv.PeerName(peer))
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if l := req.ContentLength; l > 0 {
+ attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
+ }
+
+ return attrs
+}
+
+// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.method", "net.peer.name".
+// The following attributes are returned if the related values
+// are defined in req: "net.peer.port".
+func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ net.peer.name string
+ net.peer.port int
+ */
+
+ n := 2 // method, peer name.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ peer, p := firstHostPort(h, req.Header.Get("Host"))
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+ if port > 0 {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
+
+ if port > 0 {
+ attrs = append(attrs, c.NetConv.PeerPort(port))
+ }
+
+ return attrs
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.target", "net.host.name". The following attributes are returned if they
+// related values are defined in req: "net.host.port", "net.sock.peer.addr",
+// "net.sock.peer.port", "user_agent.original", "http.client_ip",
+// "net.protocol.name", "net.protocol.version".
+func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ http.scheme string
+ net.host.name string
+ net.host.port int
+ net.sock.peer.addr string
+ net.sock.peer.port int
+ user_agent.original string
+ http.client_ip string
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ http.target string Note: doesn't include the query parameter.
+ */
+
+ /* The following semantic conventions are not returned:
+ http.status_code This requires the response.
+ http.request_content_length This requires the len() of body, which can mutate it.
+ http.response_content_length This requires the response.
+ http.route This is not available.
+ net.sock.peer.name This would require a DNS lookup.
+ net.sock.host.addr The request doesn't have access to the underlying socket.
+ net.sock.host.port The request doesn't have access to the underlying socket.
+
+ */
+ n := 4 // Method, scheme, proto, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ peer, peerPort := splitHostPort(req.RemoteAddr)
+ if peer != "" {
+ n++
+ if peerPort > 0 {
+ n++
+ }
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ n++
+ }
+
+ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP != "" {
+ n++
+ }
+
+ var target string
+ if req.URL != nil {
+ target = req.URL.Path
+ if target != "" {
+ n++
+ }
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.method(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+
+ if peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
+ }
+ }
+
+ if useragent != "" {
+ attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
+ }
+
+ if target != "" {
+ attrs = append(attrs, c.HTTPTargetKey.String(target))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+// ServerRequestMetrics returns metric attributes for an HTTP request received
+// by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port".
+func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.scheme string
+ http.route string
+ http.method string
+ http.status_code int
+ net.host.name string
+ net.host.port int
+ net.protocol.name string Note: not set if the value is "http".
+ net.protocol.version string
+ */
+
+ n := 3 // Method, scheme, and host name.
+ var host string
+ var p int
+ if server == "" {
+ host, p = splitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = splitHostPort(server)
+ if p < 0 {
+ _, p = splitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+
+ attrs = append(attrs, c.methodMetric(req.Method))
+ attrs = append(attrs, c.scheme(req.TLS != nil))
+ attrs = append(attrs, c.NetConv.HostName(host))
+
+ if hostPort > 0 {
+ attrs = append(attrs, c.NetConv.HostPort(hostPort))
+ }
+ if protoName != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
+ }
+
+ return attrs
+}
+
+func (c *httpConv) method(method string) attribute.KeyValue {
+ if method == "" {
+ return c.HTTPMethodKey.String(http.MethodGet)
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) methodMetric(method string) attribute.KeyValue {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return c.HTTPMethodKey.String(method)
+}
+
+func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return c.HTTPSchemeHTTPS
+ }
+ return c.HTTPSchemeHTTP
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+// Return the request host and port from the first non-empty source.
+func firstHostPort(source ...string) (host string, port int) {
+ for _, hostport := range source {
+ host, port = splitHostPort(hostport)
+ if host != "" || port > 0 {
+ break
+ }
+ }
+ return
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
new file mode 100644
index 00000000000..d3a06e0cada
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -0,0 +1,215 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconvutil/netconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+
+import (
+ "net"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// NetTransport returns a trace attribute describing the transport protocol of the
+// passed network. See the net.Dial for information about acceptable network
+// values.
+func NetTransport(network string) attribute.KeyValue {
+ return nc.Transport(network)
+}
+
+// netConv are the network semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type netConv struct {
+ NetHostNameKey attribute.Key
+ NetHostPortKey attribute.Key
+ NetPeerNameKey attribute.Key
+ NetPeerPortKey attribute.Key
+ NetProtocolName attribute.Key
+ NetProtocolVersion attribute.Key
+ NetSockFamilyKey attribute.Key
+ NetSockPeerAddrKey attribute.Key
+ NetSockPeerPortKey attribute.Key
+ NetSockHostAddrKey attribute.Key
+ NetSockHostPortKey attribute.Key
+ NetTransportOther attribute.KeyValue
+ NetTransportTCP attribute.KeyValue
+ NetTransportUDP attribute.KeyValue
+ NetTransportInProc attribute.KeyValue
+}
+
+var nc = &netConv{
+ NetHostNameKey: semconv.NetHostNameKey,
+ NetHostPortKey: semconv.NetHostPortKey,
+ NetPeerNameKey: semconv.NetPeerNameKey,
+ NetPeerPortKey: semconv.NetPeerPortKey,
+ NetProtocolName: semconv.NetProtocolNameKey,
+ NetProtocolVersion: semconv.NetProtocolVersionKey,
+ NetSockFamilyKey: semconv.NetSockFamilyKey,
+ NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
+ NetSockPeerPortKey: semconv.NetSockPeerPortKey,
+ NetSockHostAddrKey: semconv.NetSockHostAddrKey,
+ NetSockHostPortKey: semconv.NetSockHostPortKey,
+ NetTransportOther: semconv.NetTransportOther,
+ NetTransportTCP: semconv.NetTransportTCP,
+ NetTransportUDP: semconv.NetTransportUDP,
+ NetTransportInProc: semconv.NetTransportInProc,
+}
+
+func (c *netConv) Transport(network string) attribute.KeyValue {
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ return c.NetTransportTCP
+ case "udp", "udp4", "udp6":
+ return c.NetTransportUDP
+ case "unix", "unixgram", "unixpacket":
+ return c.NetTransportInProc
+ default:
+ // "ip:*", "ip4:*", and "ip6:*" all are considered other.
+ return c.NetTransportOther
+ }
+}
+
+// Host returns attributes for a network host address.
+func (c *netConv) Host(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.HostName(h))
+ if p > 0 {
+ attrs = append(attrs, c.HostPort(int(p)))
+ }
+ return attrs
+}
+
+func (c *netConv) HostName(name string) attribute.KeyValue {
+ return c.NetHostNameKey.String(name)
+}
+
+func (c *netConv) HostPort(port int) attribute.KeyValue {
+ return c.NetHostPortKey.Int(port)
+}
+
+func family(network, address string) string {
+ switch network {
+ case "unix", "unixgram", "unixpacket":
+ return "unix"
+ default:
+ if ip := net.ParseIP(address); ip != nil {
+ if ip.To4() == nil {
+ return "inet6"
+ }
+ return "inet"
+ }
+ }
+ return ""
+}
+
+// Peer returns attributes for a network peer address.
+func (c *netConv) Peer(address string) []attribute.KeyValue {
+ h, p := splitHostPort(address)
+ var n int
+ if h != "" {
+ n++
+ if p > 0 {
+ n++
+ }
+ }
+
+ if n == 0 {
+ return nil
+ }
+
+ attrs := make([]attribute.KeyValue, 0, n)
+ attrs = append(attrs, c.PeerName(h))
+ if p > 0 {
+ attrs = append(attrs, c.PeerPort(int(p)))
+ }
+ return attrs
+}
+
+func (c *netConv) PeerName(name string) attribute.KeyValue {
+ return c.NetPeerNameKey.String(name)
+}
+
+func (c *netConv) PeerPort(port int) attribute.KeyValue {
+ return c.NetPeerPortKey.Int(port)
+}
+
+func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
+ return c.NetSockPeerAddrKey.String(addr)
+}
+
+func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
+ return c.NetSockPeerPortKey.Int(port)
+}
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p)
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ name = strings.ToLower(name)
+ return name, version
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
new file mode 100644
index 00000000000..26a51a18050
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
+// the metrics recorded by the net/http instrumentation.
+type Labeler struct {
+ mu sync.Mutex
+ attributes []attribute.KeyValue
+}
+
+// Add attributes to a Labeler.
+func (l *Labeler) Add(ls ...attribute.KeyValue) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.attributes = append(l.attributes, ls...)
+}
+
+// Get returns a copy of the attributes added to the Labeler.
+func (l *Labeler) Get() []attribute.KeyValue {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ ret := make([]attribute.KeyValue, len(l.attributes))
+ copy(ret, l.attributes)
+ return ret
+}
+
+type labelerContextKeyType int
+
+const lablelerContextKey labelerContextKeyType = 0
+
+func injectLabeler(ctx context.Context, l *Labeler) context.Context {
+ return context.WithValue(ctx, lablelerContextKey, l)
+}
+
+// LabelerFromContext retrieves a Labeler instance from the provided context if
+// one is available. If no Labeler was found in the provided context a new, empty
+// Labeler is returned and the second return value is false. In this case it is
+// safe to use the Labeler but any attributes added to it will not be used.
+func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
+ l, ok := ctx.Value(lablelerContextKey).(*Labeler)
+ if !ok {
+ l = &Labeler{}
+ }
+ return l, ok
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
new file mode 100644
index 00000000000..8d850df3baa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -0,0 +1,286 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "net/http/httptrace"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel/metric"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+// Transport implements the http.RoundTripper interface and wraps
+// outbound HTTP(S) requests with a span and enriches it with metrics.
+type Transport struct {
+ rt http.RoundTripper
+
+ tracer trace.Tracer
+ meter metric.Meter
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ clientTrace func(context.Context) *httptrace.ClientTrace
+
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ latencyMeasure metric.Float64Histogram
+}
+
+var _ http.RoundTripper = &Transport{}
+
+// NewTransport wraps the provided http.RoundTripper with one that
+// starts a span, injects the span context into the outbound request headers,
+// and enriches it with metrics.
+//
+// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
+// as the base http.RoundTripper.
+func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
+ if base == nil {
+ base = http.DefaultTransport
+ }
+
+ t := Transport{
+ rt: base,
+ }
+
+ defaultOpts := []Option{
+ WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
+ WithSpanNameFormatter(defaultTransportFormatter),
+ }
+
+ c := newConfig(append(defaultOpts, opts...)...)
+ t.applyConfig(c)
+ t.createMeasures()
+
+ return &t
+}
+
+func (t *Transport) applyConfig(c *config) {
+ t.tracer = c.Tracer
+ t.meter = c.Meter
+ t.propagators = c.Propagators
+ t.spanStartOptions = c.SpanStartOptions
+ t.filters = c.Filters
+ t.spanNameFormatter = c.SpanNameFormatter
+ t.clientTrace = c.ClientTrace
+}
+
+func (t *Transport) createMeasures() {
+ var err error
+ t.requestBytesCounter, err = t.meter.Int64Counter(
+ clientRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ t.responseBytesCounter, err = t.meter.Int64Counter(
+ clientResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ t.latencyMeasure, err = t.meter.Float64Histogram(
+ clientDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of outbound HTTP requests."),
+ )
+ handleErr(err)
+}
+
+func defaultTransportFormatter(_ string, r *http.Request) string {
+ return "HTTP " + r.Method
+}
+
+// RoundTrip creates a Span and propagates its context via the provided request's headers
+// before handing the request to the configured base RoundTripper. The created span will
+// end when the response body is closed or when a read from the body returns io.EOF.
+func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
+ requestStartTime := time.Now()
+ for _, f := range t.filters {
+ if !f(r) {
+ // Simply pass through to the base RoundTripper if a filter rejects the request
+ return t.rt.RoundTrip(r)
+ }
+ }
+
+ tracer := t.tracer
+
+ if tracer == nil {
+ if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
+ tracer = newTracer(span.TracerProvider())
+ } else {
+ tracer = newTracer(otel.GetTracerProvider())
+ }
+ }
+
+ opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
+
+ ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
+
+ if t.clientTrace != nil {
+ ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
+ }
+
+ labeler := &Labeler{}
+ ctx = injectLabeler(ctx, labeler)
+
+ r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
+
+ // use a body wrapper to determine the request size
+ var bw bodyWrapper
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
+ bw.ReadCloser = r.Body
+ // noop to prevent nil panic. not using this record fun yet.
+ bw.record = func(int64) {}
+ r.Body = &bw
+ }
+
+ span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
+ t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
+
+ res, err := t.rt.RoundTrip(r)
+ if err != nil {
+ span.RecordError(err)
+ span.SetStatus(codes.Error, err.Error())
+ span.End()
+ return res, err
+ }
+
+ // metrics
+ metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
+ if res.StatusCode > 0 {
+ metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
+ }
+ o := metric.WithAttributes(metricAttrs...)
+ t.requestBytesCounter.Add(ctx, bw.read, o)
+ // For handling response bytes we leverage a callback when the client reads the http response
+ readRecordFunc := func(n int64) {
+ t.responseBytesCounter.Add(ctx, n, o)
+ }
+
+ // traces
+ span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
+ span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
+
+ res.Body = newWrappedBody(span, readRecordFunc, res.Body)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
+
+ t.latencyMeasure.Record(ctx, elapsedTime, o)
+
+ return res, err
+}
+
+// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
+// io.ReadCloser. If the passed body implements io.Writer, the returned value
+// will implement io.ReadWriteCloser.
+func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser {
+ // The successful protocol switch responses will have a body that
+ // implement an io.ReadWriteCloser. Ensure this interface type continues
+ // to be satisfied if that is the case.
+ if _, ok := body.(io.ReadWriteCloser); ok {
+ return &wrappedBody{span: span, record: record, body: body}
+ }
+
+ // Remove the implementation of the io.ReadWriteCloser and only implement
+ // the io.ReadCloser.
+ return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}}
+}
+
+// wrappedBody is the response body type returned by the transport
+// instrumentation to complete a span. Errors encountered when using the
+// response body are recorded in span tracking the response.
+//
+// The span tracking the response is ended when this body is closed.
+//
+// If the response body implements the io.Writer interface (i.e. for
+// successful protocol switches), the wrapped body also will.
+type wrappedBody struct {
+ span trace.Span
+ recorded atomic.Bool
+ record func(n int64)
+ body io.ReadCloser
+ read atomic.Int64
+}
+
+var _ io.ReadWriteCloser = &wrappedBody{}
+
+func (wb *wrappedBody) Write(p []byte) (int, error) {
+ // This will not panic given the guard in newWrappedBody.
+ n, err := wb.body.(io.Writer).Write(p)
+ if err != nil {
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+func (wb *wrappedBody) Read(b []byte) (int, error) {
+ n, err := wb.body.Read(b)
+ // Record the number of bytes read
+ wb.read.Add(int64(n))
+
+ switch err {
+ case nil:
+ // nothing to do here but fall through to the return
+ case io.EOF:
+ wb.recordBytesRead()
+ wb.span.End()
+ default:
+ wb.span.RecordError(err)
+ wb.span.SetStatus(codes.Error, err.Error())
+ }
+ return n, err
+}
+
+// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once.
+func (wb *wrappedBody) recordBytesRead() {
+ // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that
+ // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using
+ // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish
+ // calling wb.record(wb.read.Load()).
+ if wb.recorded.CompareAndSwap(false, true) {
+ // Record the total number of bytes read
+ wb.record(wb.read.Load())
+ }
+}
+
+func (wb *wrappedBody) Close() error {
+ wb.recordBytesRead()
+ wb.span.End()
+ if wb.body != nil {
+ return wb.body.Close()
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
new file mode 100644
index 00000000000..7499f688b11
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+// Version is the current release version of the otelhttp instrumentation.
+func Version() string {
+ return "0.48.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
new file mode 100644
index 00000000000..11a35ed167f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
@@ -0,0 +1,99 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "go.opentelemetry.io/otel/propagation"
+)
+
+var _ io.ReadCloser = &bodyWrapper{}
+
+// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type bodyWrapper struct {
+ io.ReadCloser
+ record func(n int64) // must not be nil
+
+ read int64
+ err error
+}
+
+func (w *bodyWrapper) Read(b []byte) (int, error) {
+ n, err := w.ReadCloser.Read(b)
+ n1 := int64(n)
+ w.read += n1
+ w.err = err
+ w.record(n1)
+ return n, err
+}
+
+func (w *bodyWrapper) Close() error {
+ return w.ReadCloser.Close()
+}
+
+var _ http.ResponseWriter = &respWriterWrapper{}
+
+// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
+// that may be useful when using it in real life situations.
+type respWriterWrapper struct {
+ http.ResponseWriter
+ record func(n int64) // must not be nil
+
+ // used to inject the header
+ ctx context.Context
+
+ props propagation.TextMapPropagator
+
+ written int64
+ statusCode int
+ err error
+ wroteHeader bool
+}
+
+func (w *respWriterWrapper) Header() http.Header {
+ return w.ResponseWriter.Header()
+}
+
+func (w *respWriterWrapper) Write(p []byte) (int, error) {
+ if !w.wroteHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+ n, err := w.ResponseWriter.Write(p)
+ n1 := int64(n)
+ w.record(n1)
+ w.written += n1
+ w.err = err
+ return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *respWriterWrapper) WriteHeader(statusCode int) {
+ if !w.wroteHeader {
+ w.wroteHeader = true
+ w.statusCode = statusCode
+ }
+ w.ResponseWriter.WriteHeader(statusCode)
+}
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
index f3355c852be..895c7664beb 100644
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -14,12 +14,9 @@ go.work.sum
gen/
/example/dice/dice
-/example/fib/fib
-/example/fib/traces.txt
-/example/jaeger/jaeger
/example/namedtracer/namedtracer
+/example/otel-collector/otel-collector
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/zipkin/zipkin
-/example/otel-collector/otel-collector
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 6e8eeec00fa..a62511f382e 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -12,8 +12,9 @@ linters:
- depguard
- errcheck
- godot
- - gofmt
+ - gofumpt
- goimports
+ - gosec
- gosimple
- govet
- ineffassign
@@ -53,6 +54,20 @@ issues:
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
+ # It's okay to not run gosec in a test.
+ - path: _test\.go
+ linters:
+ - gosec
+ # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+ # as we commonly use it in tests and examples.
+ - text: "G404:"
+ linters:
+ - gosec
+ # Igonoring gosec G402: TLS MinVersion too low
+ # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
+ - text: "G402: TLS MinVersion too low."
+ linters:
+ - gosec
include:
# revive exported should have comment or be unexported.
- EXC0012
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 3e5c35b5dcc..ea1f723e264 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,173 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.23.0] 2024-02-06
+
+This release contains the first stable, `v1`, release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
+- Experimental exemplar exporting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/EXPERIMENTAL.md#exemplars) for more information about this feature and how to enable it. (#4871)
+- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
+ This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
+
+### Changed
+
+- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
+ Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
+ It is up to the user to decide if they want to use the returned `Resource` or not.
+ It may have desired attributes overwritten or include stale semantic conventions. (#4876)
+
+### Fixed
+
+- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
+- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
+- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
+
+## [1.23.0-rc.1] 2024-01-18
+
+This is a release candidate for the v1.23.0 release.
+That release is expected to include the `v1` release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+## [1.22.0/0.45.0] 2024-01-17
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+ The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
+ The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
+- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
+ The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
+- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
+ The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+- Experimental cardinality limiting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
+- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
+
+### Changed
+
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
+- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
+- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
+ If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
+- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
+- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
+- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
+- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
+
+### Fixed
+
+- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
+- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
+- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
+- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
+- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
+- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
+
+## [1.21.0/0.44.0] 2023-11-16
+
+### Removed
+
+- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
+- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
+- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
+- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
+
+### Fixed
+
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
+
+## [1.20.0/0.43.0] 2023-11-10
+
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+
+### Added
+
+- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
+- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
+- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
+- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
+- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
+- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
+- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
+- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
+- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
+- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
+- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
+ Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
+- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
+
+### Changed
+
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
+- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
+ This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
+ Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
+ This extends the `Tracer` interface and is is a breaking change for any existing implementation.
+ Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
+ This extends the `Span` interface and is is a breaking change for any existing implementation.
+ Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
+
+### Fixed
+
+- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
+- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
+
## [1.19.0/0.42.0/0.0.7] 2023-09-28
This release contains the first stable release of the OpenTelemetry Go [metric SDK].
@@ -2656,7 +2823,12 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.23.0...HEAD
+[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
+[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
+[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
+[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
+[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
@@ -2731,7 +2903,7 @@ It contains api and sdk for trace and meter.
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
-[Go 1.19]: https://go.dev/doc/go1.19
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
+[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 623740007d4..31d336d9222 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
-CODEOWNERS @MrAlias @MadVikingGod @pellared
\ No newline at end of file
+CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index a00dbca7b08..c9f2bac55bf 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -90,6 +90,10 @@ git push
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`.
+Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
+Rewriting Git history makes it difficult to keep track of iterations during code review.
+All pull requests are squashed to a single commit upon merge to `main`.
+
### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title,
@@ -587,25 +591,46 @@ this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
+### Ignoring context cancellation
+
+OpenTelemetry API implementations need to ignore the cancellation of the context that are
+passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
+Recording methods should not return an error describing the cancellation state of the context
+when they complete, nor should they abort any work.
+
+This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
+the method. In that case the context cancellation can be used for the timeout with the
+restriction that this behavior is documented for the method. Otherwise, timeouts
+are expected to be handled by the user calling the API, not the implementation.
+
+Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
+of a provider. It is assumed the context passed from a user is not used for this purpose.
+
+Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
+force flushing telemetry, shutting down a signal provider) the context cancellation
+should be honored. This means all work done on behalf of the user provided context
+should be canceled.
+
## Approvers and Maintainers
### Approvers
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
-- [David Ashpole](https://github.com/dashpole), Google
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
+- [David Ashpole](https://github.com/dashpole), Google
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Robert PajÄ…k](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
+- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 5c311706b0c..35fc189961b 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
GORELEASE = $(TOOLS)/gorelease
$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
+GOVULNCHECK = $(TOOLS)/govulncheck
+$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
+
.PHONY: tools
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
@@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
+# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
+BENCHMARK_TARGETS := sdk/trace
+.PHONY: benchmark
+benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
+BENCHMARK_FILTER = .
+# You can override the filter for a particular directory by adding a rule here.
+benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
+benchmark/%:
+ @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
+ && cd $* \
+ $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
+
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
@@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink
lint-modules: go-mod-tidy
.PHONY: lint
-lint: misspell lint-modules golangci-lint
+lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
@@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO)
misspell: | $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS)
+.PHONY: govulncheck
+govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
+govulncheck/%: DIR=$*
+govulncheck/%: | $(GOVULNCHECK)
+ @echo "govulncheck ./... in $(DIR)" \
+ && cd $(DIR) \
+ && $(GOVULNCHECK) ./...
+
.PHONY: codespell
codespell: | $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
@@ -289,3 +312,7 @@ COMMIT ?= "HEAD"
add-tags: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
+
+.PHONY: lint-markdown
+lint-markdown:
+ docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 634326ef833..44e1bfc9b5e 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
-| Signal | Status | Project |
-|---------|------------|-----------------------|
-| Traces | Stable | N/A |
-| Metrics | Mixed [1] | [Go: Metric SDK (GA)] |
-| Logs | Frozen [2] | N/A |
+| Signal | Status |
+|---------|------------|
+| Traces | Stable |
+| Metrics | Stable |
+| Logs | Design [1] |
-[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34
-
-- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta.
-- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK.
+- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our
@@ -69,7 +66,7 @@ are made for those systems currently.
## Getting Started
-You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 82ce3ee46a1..d2691d0bd8b 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -123,12 +123,12 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
-Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
-[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
-[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
+[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
+[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
### Demo Repository
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 9f9303d4f15..fb6da51450c 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -279,52 +279,75 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
position--
kvs[offset], kvs[position] = kvs[position], kvs[offset]
}
+ kvs = kvs[position:]
+
if filter != nil {
- return filterSet(kvs[position:], filter)
+ if div := filteredToFront(kvs, filter); div != 0 {
+ return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ }
}
- return Set{
- equivalent: computeDistinct(kvs[position:]),
- }, nil
+ return Set{equivalent: computeDistinct(kvs)}, nil
}
-// filterSet reorders kvs so that included keys are contiguous at the end of
-// the slice, while excluded keys precede the included keys.
-func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- var excluded []KeyValue
-
- // Move attributes that do not match the filter so they're adjacent before
- // calling computeDistinct().
- distinctPosition := len(kvs)
-
- // Swap indistinct keys forward and distinct keys toward the
- // end of the slice.
- offset := len(kvs) - 1
- for ; offset >= 0; offset-- {
- if filter(kvs[offset]) {
- distinctPosition--
- kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
- continue
+// filteredToFront filters slice in-place using keep function. All KeyValues that need to
+// be removed are moved to the front. All KeyValues that need to be kept are
+// moved (in-order) to the back. The index for the first KeyValue to be kept is
+// returned.
+func filteredToFront(slice []KeyValue, keep Filter) int {
+ n := len(slice)
+ j := n
+ for i := n - 1; i >= 0; i-- {
+ if keep(slice[i]) {
+ j--
+ slice[i], slice[j] = slice[j], slice[i]
}
}
- excluded = kvs[:distinctPosition]
-
- return Set{
- equivalent: computeDistinct(kvs[distinctPosition:]),
- }, excluded
+ return j
}
// Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil {
- return Set{
- equivalent: l.equivalent,
- }, nil
+ return *l, nil
}
- // Note: This could be refactored to avoid the temporary slice
- // allocation, if it proves to be expensive.
- return filterSet(l.ToSlice(), re)
+ // Iterate in reverse to the first attribute that will be filtered out.
+ n := l.Len()
+ first := n - 1
+ for ; first >= 0; first-- {
+ kv, _ := l.Get(first)
+ if !re(kv) {
+ break
+ }
+ }
+
+ // No attributes will be dropped, return the immutable Set l and nil.
+ if first < 0 {
+ return *l, nil
+ }
+
+ // Copy now that we know we need to return a modified set.
+ //
+ // Do not do this in-place on the underlying storage of *Set l. Sets are
+ // immutable and filtering should not change this.
+ slice := l.ToSlice()
+
+ // Don't re-iterate the slice if only slice[0] is filtered.
+ if first == 0 {
+ // It is safe to assume len(slice) >= 1 given we found at least one
+ // attribute above that needs to be filtered out.
+ return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ }
+
+ // Move the filtered slice[first] to the front (preserving order).
+ kv := slice[first]
+ copy(slice[1:first+1], slice[:first])
+ slice[0] = kv
+
+ // Do not re-evaluate re(slice[first+1:]).
+ div := filteredToFront(slice[1:first+1], re) + 1
+ return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
}
// computeDistinct returns a Distinct using either the fixed- or
@@ -404,7 +427,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface)
}
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+// MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 9e6b3b7b52a..7d27cf77d5c 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"net/url"
- "regexp"
"strings"
"go.opentelemetry.io/otel/internal/baggage"
@@ -32,16 +31,6 @@ const (
listDelimiter = ","
keyValueDelimiter = "="
propertyDelimiter = ";"
-
- keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
- valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
- keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
-)
-
-var (
- keyRe = regexp.MustCompile(`^` + keyDef + `$`)
- valueRe = regexp.MustCompile(`^` + valueDef + `$`)
- propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
)
var (
@@ -67,7 +56,7 @@ type Property struct {
//
// If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) {
- if !keyRe.MatchString(key) {
+ if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
@@ -77,14 +66,29 @@ func NewKeyProperty(key string) (Property, error) {
// NewKeyValueProperty returns a new Property for key with value.
//
-// If key or value are invalid, an error will be returned.
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be precent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewKeyValuePropertyRaw] instead
+// that does not require precent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) {
- if !keyRe.MatchString(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ if !validateValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
- if !valueRe.MatchString(value) {
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
+ return NewKeyValuePropertyRaw(key, decodedValue)
+}
+
+// NewKeyValuePropertyRaw returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewKeyValuePropertyRaw(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
p := Property{
key: key,
@@ -106,20 +110,11 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), nil
}
- match := propertyRe.FindStringSubmatch(property)
- if len(match) != 4 {
+ p, ok := parsePropertyInternal(property)
+ if !ok {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
- var p Property
- if match[1] != "" {
- p.key = match[1]
- } else {
- p.key = match[2]
- p.value = match[3]
- p.hasValue = true
- }
-
return p, nil
}
@@ -130,12 +125,9 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
- if !keyRe.MatchString(p.key) {
+ if !validateKey(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
- if p.hasValue && !valueRe.MatchString(p.value) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
- }
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
@@ -154,11 +146,11 @@ func (p Property) Value() (string, bool) {
return p.value, p.hasValue
}
-// String encodes Property into a string compliant with the W3C Baggage
+// String encodes Property into a header string compliant with the W3C Baggage
// specification.
func (p Property) String() string {
if p.hasValue {
- return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
+ return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
}
return p.key
}
@@ -218,7 +210,7 @@ func (p properties) validate() error {
return nil
}
-// String encodes properties into a string compliant with the W3C Baggage
+// String encodes properties into a header string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
props := make([]string, len(p))
@@ -240,11 +232,28 @@ type Member struct {
hasData bool
}
-// NewMember returns a new Member from the passed arguments. The key will be
-// used directly while the value will be url decoded after validation. An error
-// is returned if the created Member would be invalid according to the W3C
-// Baggage specification.
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be precent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewMemberRaw] instead
+// that does not require precent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateValue(value) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewMemberRaw(key, decodedValue, props...)
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
value: value,
@@ -254,11 +263,6 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
- decodedValue, err := url.QueryUnescape(value)
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- m.value = decodedValue
return m, nil
}
@@ -274,11 +278,7 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
}
- var (
- key, value string
- props properties
- )
-
+ var props properties
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
@@ -299,36 +299,34 @@ func parseMember(member string) (Member, error) {
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
- key = strings.TrimSpace(k)
- var err error
- value, err = url.QueryUnescape(strings.TrimSpace(v))
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
- }
- if !keyRe.MatchString(key) {
+ key := strings.TrimSpace(k)
+ if !validateKey(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
- if !valueRe.MatchString(value) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+
+ val := strings.TrimSpace(v)
+ if !validateValue(val) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
}
+ // Decode a precent-encoded value.
+ value, err := url.PathUnescape(val)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err)
+ }
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
// validate ensures m conforms to the W3C Baggage specification.
-// A key is just an ASCII string, but a value must be URL encoded UTF-8,
-// returning an error otherwise.
+// A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error {
if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
- if !keyRe.MatchString(m.key) {
+ if !validateKey(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
- if !valueRe.MatchString(m.value) {
- return fmt.Errorf("%w: %q", errInvalidValue, m.value)
- }
return m.properties.validate()
}
@@ -341,11 +339,13 @@ func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() }
-// String encodes Member into a string compliant with the W3C Baggage
+// String encodes Member into a header string compliant with the W3C Baggage
// specification.
func (m Member) String() string {
- // A key is just an ASCII string, but a value is URL encoded UTF-8.
- s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
+ // A key is just an ASCII string. A value is restricted to be
+ // US-ASCII characters excluding CTLs, whitespace,
+ // DQUOTE, comma, semicolon, and backslash.
+ s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
@@ -536,9 +536,8 @@ func (b Baggage) Len() int {
return len(b.list)
}
-// String encodes Baggage into a string compliant with the W3C Baggage
-// specification. The returned string will be invalid if the Baggage contains
-// any invalid list-members.
+// String encodes Baggage into a header string compliant with the W3C Baggage
+// specification.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
@@ -550,3 +549,196 @@ func (b Baggage) String() string {
}
return strings.Join(members, listDelimiter)
}
+
+// parsePropertyInternal attempts to decode a Property from the passed string.
+// It follows the spec at https://www.w3.org/TR/baggage/#definition.
+func parsePropertyInternal(s string) (p Property, ok bool) {
+ // For the entire function we will use " key = value " as an example.
+ // Attempting to parse the key.
+ // First skip spaces at the beginning "< >key = value " (they could be empty).
+ index := skipSpace(s, 0)
+
+ // Parse the key: " = value ".
+ keyStart := index
+ keyEnd := index
+ for _, c := range s[keyStart:] {
+ if !validateKeyChar(c) {
+ break
+ }
+ keyEnd++
+ }
+
+ // If we couldn't find any valid key character,
+ // it means the key is either empty or invalid.
+ if keyStart == keyEnd {
+ return
+ }
+
+ // Skip spaces after the key: " key< >= value ".
+ index = skipSpace(s, keyEnd)
+
+ if index == len(s) {
+ // A key can have no value, like: " key ".
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ return
+ }
+
+ // If we have not reached the end and we can't find the '=' delimiter,
+ // it means the property is invalid.
+ if s[index] != keyValueDelimiter[0] {
+ return
+ }
+
+ // Attempting to parse the value.
+ // Match: " key =< >value ".
+ index = skipSpace(s, index+1)
+
+ // Match the value string: " key = ".
+ // A valid property can be: " key =".
+ // Therefore, we don't have to check if the value is empty.
+ valueStart := index
+ valueEnd := index
+ for _, c := range s[valueStart:] {
+ if !validateValueChar(c) {
+ break
+ }
+ valueEnd++
+ }
+
+ // Skip all trailing whitespaces: " key = value< >".
+ index = skipSpace(s, valueEnd)
+
+ // If after looking for the value and skipping whitespaces
+ // we have not reached the end, it means the property is
+ // invalid, something like: " key = value value1".
+ if index != len(s) {
+ return
+ }
+
+ // Decode a precent-encoded value.
+ value, err := url.PathUnescape(s[valueStart:valueEnd])
+ if err != nil {
+ return
+ }
+
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ p.hasValue = true
+
+ p.value = value
+ return
+}
+
+func skipSpace(s string, offset int) int {
+ i := offset
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c != ' ' && c != '\t' {
+ break
+ }
+ }
+ return i
+}
+
+func validateKey(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ for _, c := range s {
+ if !validateKeyChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateKeyChar(c int32) bool {
+ return (c >= 0x23 && c <= 0x27) ||
+ (c >= 0x30 && c <= 0x39) ||
+ (c >= 0x41 && c <= 0x5a) ||
+ (c >= 0x5e && c <= 0x7a) ||
+ c == 0x21 ||
+ c == 0x2a ||
+ c == 0x2b ||
+ c == 0x2d ||
+ c == 0x2e ||
+ c == 0x7c ||
+ c == 0x7e
+}
+
+func validateValue(s string) bool {
+ for _, c := range s {
+ if !validateValueChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateValueChar(c int32) bool {
+ return c == 0x21 ||
+ (c >= 0x23 && c <= 0x2b) ||
+ (c >= 0x2d && c <= 0x3a) ||
+ (c >= 0x3c && c <= 0x5b) ||
+ (c >= 0x5d && c <= 0x7e)
+}
+
+// valueEscape escapes the string so it can be safely placed inside a baggage value,
+// replacing special characters with %XX sequences as needed.
+//
+// The implementation is based on:
+// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
+func valueEscape(s string) string {
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ var buf [64]byte
+ var t []byte
+
+ required := len(s) + 2*hexCount
+ if required <= len(buf) {
+ t = buf[:required]
+ } else {
+ t = make([]byte, required)
+ }
+
+ j := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(s[i]) {
+ const upperhex = "0123456789ABCDEF"
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+
+ return string(t)
+}
+
+// shouldEscape returns true if the specified byte should be escaped when
+// appearing in a baggage value string.
+func shouldEscape(c byte) bool {
+ if c == '%' {
+ // The percent character must be encoded so that percent-encoding can work.
+ return true
+ }
+ return !validateValueChar(int32(c))
+}
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index daa36c89dc6..36d7c24e88e 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -22,7 +22,7 @@ transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
exporters are used to process and transport this data.
-To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
+To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
To read more about tracing, see go.opentelemetry.io/otel/trace.
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
index a33eded872a..ebb13c20678 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -34,11 +34,13 @@ type afCounter struct {
name string
opts []metric.Float64ObservableCounterOption
- delegate atomic.Value //metric.Float64ObservableCounter
+ delegate atomic.Value // metric.Float64ObservableCounter
}
-var _ unwrapper = (*afCounter)(nil)
-var _ metric.Float64ObservableCounter = (*afCounter)(nil)
+var (
+ _ unwrapper = (*afCounter)(nil)
+ _ metric.Float64ObservableCounter = (*afCounter)(nil)
+)
func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
@@ -63,11 +65,13 @@ type afUpDownCounter struct {
name string
opts []metric.Float64ObservableUpDownCounterOption
- delegate atomic.Value //metric.Float64ObservableUpDownCounter
+ delegate atomic.Value // metric.Float64ObservableUpDownCounter
}
-var _ unwrapper = (*afUpDownCounter)(nil)
-var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+var (
+ _ unwrapper = (*afUpDownCounter)(nil)
+ _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+)
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
@@ -92,11 +96,13 @@ type afGauge struct {
name string
opts []metric.Float64ObservableGaugeOption
- delegate atomic.Value //metric.Float64ObservableGauge
+ delegate atomic.Value // metric.Float64ObservableGauge
}
-var _ unwrapper = (*afGauge)(nil)
-var _ metric.Float64ObservableGauge = (*afGauge)(nil)
+var (
+ _ unwrapper = (*afGauge)(nil)
+ _ metric.Float64ObservableGauge = (*afGauge)(nil)
+)
func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
@@ -121,11 +127,13 @@ type aiCounter struct {
name string
opts []metric.Int64ObservableCounterOption
- delegate atomic.Value //metric.Int64ObservableCounter
+ delegate atomic.Value // metric.Int64ObservableCounter
}
-var _ unwrapper = (*aiCounter)(nil)
-var _ metric.Int64ObservableCounter = (*aiCounter)(nil)
+var (
+ _ unwrapper = (*aiCounter)(nil)
+ _ metric.Int64ObservableCounter = (*aiCounter)(nil)
+)
func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
@@ -150,11 +158,13 @@ type aiUpDownCounter struct {
name string
opts []metric.Int64ObservableUpDownCounterOption
- delegate atomic.Value //metric.Int64ObservableUpDownCounter
+ delegate atomic.Value // metric.Int64ObservableUpDownCounter
}
-var _ unwrapper = (*aiUpDownCounter)(nil)
-var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+var (
+ _ unwrapper = (*aiUpDownCounter)(nil)
+ _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+)
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
@@ -179,11 +189,13 @@ type aiGauge struct {
name string
opts []metric.Int64ObservableGaugeOption
- delegate atomic.Value //metric.Int64ObservableGauge
+ delegate atomic.Value // metric.Int64ObservableGauge
}
-var _ unwrapper = (*aiGauge)(nil)
-var _ metric.Int64ObservableGauge = (*aiGauge)(nil)
+var (
+ _ unwrapper = (*aiGauge)(nil)
+ _ metric.Int64ObservableGauge = (*aiGauge)(nil)
+)
func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
@@ -208,7 +220,7 @@ type sfCounter struct {
name string
opts []metric.Float64CounterOption
- delegate atomic.Value //metric.Float64Counter
+ delegate atomic.Value // metric.Float64Counter
}
var _ metric.Float64Counter = (*sfCounter)(nil)
@@ -234,7 +246,7 @@ type sfUpDownCounter struct {
name string
opts []metric.Float64UpDownCounterOption
- delegate atomic.Value //metric.Float64UpDownCounter
+ delegate atomic.Value // metric.Float64UpDownCounter
}
var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
@@ -260,7 +272,7 @@ type sfHistogram struct {
name string
opts []metric.Float64HistogramOption
- delegate atomic.Value //metric.Float64Histogram
+ delegate atomic.Value // metric.Float64Histogram
}
var _ metric.Float64Histogram = (*sfHistogram)(nil)
@@ -286,7 +298,7 @@ type siCounter struct {
name string
opts []metric.Int64CounterOption
- delegate atomic.Value //metric.Int64Counter
+ delegate atomic.Value // metric.Int64Counter
}
var _ metric.Int64Counter = (*siCounter)(nil)
@@ -312,7 +324,7 @@ type siUpDownCounter struct {
name string
opts []metric.Int64UpDownCounterOption
- delegate atomic.Value //metric.Int64UpDownCounter
+ delegate atomic.Value // metric.Int64UpDownCounter
}
var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
@@ -338,7 +350,7 @@ type siHistogram struct {
name string
opts []metric.Int64HistogramOption
- delegate atomic.Value //metric.Int64Histogram
+ delegate atomic.Value // metric.Int64Histogram
}
var _ metric.Int64Histogram = (*siHistogram)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
index 5f008d0982b..3f61ec12a34 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -39,6 +39,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
)
// tracerProvider is a placeholder for a configured SDK TracerProvider.
@@ -46,6 +47,8 @@ import (
// All TracerProvider functionality is forwarded to a delegate once
// configured.
type tracerProvider struct {
+ embedded.TracerProvider
+
mtx sync.Mutex
tracers map[il]*tracer
delegate trace.TracerProvider
@@ -119,6 +122,8 @@ type il struct {
// All Tracer functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopTracer.
type tracer struct {
+ embedded.Tracer
+
name string
opts []trace.TracerOption
provider *tracerProvider
@@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
// SpanContext. It performs no operations other than to return the wrapped
// SpanContext.
type nonRecordingSpan struct {
+ embedded.Span
+
sc trace.SpanContext
tracer *tracer
}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
index ae24e448d91..54716e13b35 100644
--- a/vendor/go.opentelemetry.io/otel/metric/doc.go
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric].
Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example,
-an author who want to default to silently dropping the call can use
+an author who wants to default to silently dropping the call can use
[go.opentelemetry.io/otel/metric/noop]:
import "go.opentelemetry.io/otel/metric/noop"
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
index cdca00058c6..be89cd53341 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -39,6 +39,12 @@ type InstrumentOption interface {
Float64ObservableGaugeOption
}
+// HistogramOption applies options to histogram instruments.
+type HistogramOption interface {
+ Int64HistogramOption
+ Float64HistogramOption
+}
+
type descOpt string
func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
@@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
func WithUnit(u string) InstrumentOption { return unitOpt(u) }
+// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
+//
+// This option is considered "advisory", and may be ignored by API implementations.
+func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
+
+type bucketOpt []float64
+
+func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.explicitBucketBoundaries = o
+ return c
+}
+
+func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.explicitBucketBoundaries = o
+ return c
+}
+
// AddOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as an AddOption.
type AddOption interface {
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 00000000000..acc9a670b22
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,264 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
index f0b063721d8..0a4825ae6a7 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -147,8 +147,9 @@ type Float64Histogram interface {
// Float64HistogramConfig contains options for synchronous counter instruments
// that record int64 values.
type Float64HistogramConfig struct {
- description string
- unit string
+ description string
+ unit string
+ explicitBucketBoundaries []float64
}
// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
@@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string {
return c.unit
}
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+ return c.explicitBucketBoundaries
+}
+
// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as a
// Float64HistogramOption.
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
index 6f508eb66d4..56667d32fc0 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -147,8 +147,9 @@ type Int64Histogram interface {
// Int64HistogramConfig contains options for synchronous counter instruments
// that record int64 values.
type Int64HistogramConfig struct {
- description string
- unit string
+ description string
+ unit string
+ explicitBucketBoundaries []float64
}
// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
@@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string {
return c.unit
}
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+ return c.explicitBucketBoundaries
+}
+
// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as an
// Int64HistogramOption.
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 902692da082..63e5d62221f 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -18,7 +18,7 @@ import (
"context"
"encoding/hex"
"fmt"
- "regexp"
+ "strings"
"go.opentelemetry.io/otel/trace"
)
@@ -28,6 +28,7 @@ const (
maxVersion = 254
traceparentHeader = "traceparent"
tracestateHeader = "tracestate"
+ delimiter = "-"
)
// TraceContext is a propagator that supports the W3C Trace Context format
@@ -40,8 +41,10 @@ const (
// their proprietary information.
type TraceContext struct{}
-var _ TextMapPropagator = TraceContext{}
-var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$")
+var (
+ _ TextMapPropagator = TraceContext{}
+ versionPart = fmt.Sprintf("%.2X", supportedVersion)
+)
// Inject set tracecontext from the Context into the carrier.
func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
@@ -57,12 +60,19 @@ func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
// Clear all flags other than the trace-context supported sampling bit.
flags := sc.TraceFlags() & trace.FlagsSampled
- h := fmt.Sprintf("%.2x-%s-%s-%s",
- supportedVersion,
- sc.TraceID(),
- sc.SpanID(),
- flags)
- carrier.Set(traceparentHeader, h)
+ var sb strings.Builder
+ sb.Grow(2 + 32 + 16 + 2 + 3)
+ _, _ = sb.WriteString(versionPart)
+ traceID := sc.TraceID()
+ spanID := sc.SpanID()
+ flagByte := [1]byte{byte(flags)}
+ var buf [32]byte
+ for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+ _ = sb.WriteByte(delimiter[0])
+ n := hex.Encode(buf[:], src)
+ _, _ = sb.Write(buf[:n])
+ }
+ carrier.Set(traceparentHeader, sb.String())
}
// Extract reads tracecontext from the carrier into a returned Context.
@@ -84,21 +94,8 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
- matches := traceCtxRegExp.FindStringSubmatch(h)
-
- if len(matches) == 0 {
- return trace.SpanContext{}
- }
-
- if len(matches) < 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[1]) != 2 {
- return trace.SpanContext{}
- }
- ver, err := hex.DecodeString(matches[1])
- if err != nil {
+ var ver [1]byte
+ if !extractPart(ver[:], &h, 2) {
return trace.SpanContext{}
}
version := int(ver[0])
@@ -106,36 +103,24 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return trace.SpanContext{}
}
- if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[2]) != 32 {
- return trace.SpanContext{}
- }
-
var scc trace.SpanContextConfig
-
- scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
- if err != nil {
+ if !extractPart(scc.TraceID[:], &h, 32) {
return trace.SpanContext{}
}
-
- if len(matches[3]) != 16 {
- return trace.SpanContext{}
- }
- scc.SpanID, err = trace.SpanIDFromHex(matches[3])
- if err != nil {
+ if !extractPart(scc.SpanID[:], &h, 16) {
return trace.SpanContext{}
}
- if len(matches[4]) != 2 {
+ var opts [1]byte
+ if !extractPart(opts[:], &h, 2) {
return trace.SpanContext{}
}
- opts, err := hex.DecodeString(matches[4])
- if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
+ if version == 0 && (h != "" || opts[0] > 2) {
+ // version 0 not allow extra
+ // version 0 not allow other flag
return trace.SpanContext{}
}
+
// Clear all flags other than the trace-context supported sampling bit.
scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
@@ -153,6 +138,29 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
return sc
}
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+ for _, c := range v {
+ if c >= 'A' && c <= 'F' {
+ return true
+ }
+ }
+ return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+ part, left, _ := strings.Cut(*h, delimiter)
+ *h = left
+ // hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+ if len(part) != n || upperHex(part) {
+ return false
+ }
+ if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+ return false
+ }
+ return true
+}
+
// Fields returns the keys who's values are set with Inject.
func (tc TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
index ddff454685c..e0a43e13840 100644
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -1 +1 @@
-codespell==2.2.5
+codespell==2.2.6
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
index 324dd4baf24..4279013be88 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -21,12 +21,10 @@ import (
"strings"
)
-var (
- // ErrPartialResource is returned by a detector when complete source
- // information for a Resource is unavailable or the source information
- // contains invalid values that are omitted from the returned Resource.
- ErrPartialResource = errors.New("partial resource")
-)
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
// Detector detects OpenTelemetry resource information.
type Detector interface {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index a847c50622e..e29ae563a69 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -28,16 +28,14 @@ import (
const (
// resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
- resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES"
+ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
// svcNameKey is the environment variable name that Service Name information will be read from.
svcNameKey = "OTEL_SERVICE_NAME"
)
-var (
- // errMissingValue is returned when a resource value is missing.
- errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
-)
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
// fromEnv is a Detector that implements the Detector and collects
// resources from environment. This Detector is included as a
@@ -91,7 +89,7 @@ func constructOTResources(s string) (*Resource, error) {
continue
}
key := strings.TrimSpace(k)
- val, err := url.QueryUnescape(strings.TrimSpace(v))
+ val, err := url.PathUnescape(strings.TrimSpace(v))
if err != nil {
// Retain original value if decoding fails, otherwise it will be
// an empty string.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 84e1c585605..0cbd559739c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -36,8 +36,10 @@ func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
osDescription = osDescriptionProvider
}
-type osTypeDetector struct{}
-type osDescriptionDetector struct{}
+type (
+ osTypeDetector struct{}
+ osDescriptionDetector struct{}
+)
// Detect returns a *Resource that describes the operating system type the
// service is running on.
@@ -56,7 +58,6 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
// service is running on.
func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
description, err := osDescription()
-
if err != nil {
return nil, err
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index e67ff29e26d..ecdd11dd762 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -25,14 +25,16 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
)
-type pidProvider func() int
-type executablePathProvider func() (string, error)
-type commandArgsProvider func() []string
-type ownerProvider func() (*user.User, error)
-type runtimeNameProvider func() string
-type runtimeVersionProvider func() string
-type runtimeOSProvider func() string
-type runtimeArchProvider func() string
+type (
+ pidProvider func() int
+ executablePathProvider func() (string, error)
+ commandArgsProvider func() []string
+ ownerProvider func() (*user.User, error)
+ runtimeNameProvider func() string
+ runtimeVersionProvider func() string
+ runtimeOSProvider func() string
+ runtimeArchProvider func() string
+)
var (
defaultPidProvider pidProvider = os.Getpid
@@ -108,14 +110,16 @@ func setUserProviders(ownerProvider ownerProvider) {
owner = ownerProvider
}
-type processPIDDetector struct{}
-type processExecutableNameDetector struct{}
-type processExecutablePathDetector struct{}
-type processCommandArgsDetector struct{}
-type processOwnerDetector struct{}
-type processRuntimeNameDetector struct{}
-type processRuntimeVersionDetector struct{}
-type processRuntimeDescriptionDetector struct{}
+type (
+ processPIDDetector struct{}
+ processExecutableNameDetector struct{}
+ processExecutablePathDetector struct{}
+ processCommandArgsDetector struct{}
+ processOwnerDetector struct{}
+ processRuntimeNameDetector struct{}
+ processRuntimeVersionDetector struct{}
+ processRuntimeDescriptionDetector struct{}
+)
// Detect returns a *Resource that describes the process identifier (PID) of the
// executing process.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 0a018c14ded..7d46c4b48e5 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -25,6 +25,8 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/noop"
)
const (
@@ -73,6 +75,8 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} {
// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
// instrumentation so it can trace operational flow through a system.
type TracerProvider struct {
+ embedded.TracerProvider
+
mu sync.Mutex
namedTracer map[instrumentation.Scope]*tracer
spanProcessors atomic.Pointer[spanProcessorStates]
@@ -139,7 +143,7 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
// This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
if p.isShutdown.Load() {
- return trace.NewNoopTracerProvider().Tracer(name, opts...)
+ return noop.NewTracerProvider().Tracer(name, opts...)
}
c := trace.NewTracerConfig(opts...)
if name == "" {
@@ -157,7 +161,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
// Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
// after the first check above but before we acquired the mutex.
if p.isShutdown.Load() {
- return trace.NewNoopTracerProvider().Tracer(name, opts...), true
+ return noop.NewTracerProvider().Tracer(name, opts...), true
}
t, ok := p.namedTracer[is]
if !ok {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index 5ee9715d27b..a7bc125b9e8 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -158,9 +158,9 @@ func NeverSample() Sampler {
return alwaysOffSampler{}
}
-// ParentBased returns a composite sampler which behaves differently,
+// ParentBased returns a sampler decorator which behaves differently,
// based on the parent of the span. If the span has no parent,
-// the root(Sampler) is used to make sampling decision. If the span has
+// the decorated sampler is used to make sampling decision. If the span has
// a parent, depending on whether the parent is remote and whether it
// is sampled, one of the following samplers will apply:
// - remoteParentSampled(Sampler) (default: AlwaysOn)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 37cdd4a694a..36dbf67764b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -32,6 +32,7 @@ import (
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
)
// ReadOnlySpan allows reading information from the data structure underlying a
@@ -108,6 +109,8 @@ type ReadWriteSpan interface {
// recordingSpan is an implementation of the OpenTelemetry Span API
// representing the individual component of a trace that is sampled.
type recordingSpan struct {
+ embedded.Span
+
// mu protects the contents of this span.
mu sync.Mutex
@@ -158,8 +161,10 @@ type recordingSpan struct {
tracer *tracer
}
-var _ ReadWriteSpan = (*recordingSpan)(nil)
-var _ runtimeTracer = (*recordingSpan)(nil)
+var (
+ _ ReadWriteSpan = (*recordingSpan)(nil)
+ _ runtimeTracer = (*recordingSpan)(nil)
+)
// SpanContext returns the SpanContext of this span.
func (s *recordingSpan) SpanContext() trace.SpanContext {
@@ -772,6 +777,8 @@ func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
// that wraps a SpanContext. It performs no operations other than to return
// the wrapped SpanContext or TracerProvider that created it.
type nonRecordingSpan struct {
+ embedded.Span
+
// tracer is the SDK tracer that created this span.
tracer *tracer
sc trace.SpanContext
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index 85a71227f3f..301e1a7abcc 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -20,9 +20,12 @@ import (
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
)
type tracer struct {
+ embedded.Tracer
+
provider *TracerProvider
instrumentationScope instrumentation.Scope
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index 72d2cb09f7b..422d4c964b3 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.19.0"
+ return "1.21.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
new file mode 100644
index 00000000000..71a1f7748d5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.17.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
new file mode 100644
index 00000000000..679c40c4de4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
new file mode 100644
index 00000000000..9b8c559de42
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
new file mode 100644
index 00000000000..d5c4b5c136a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
new file mode 100644
index 00000000000..39a2eab3a6a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -0,0 +1,2010 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserUserAgentKey is the attribute Key conforming to the
+ // "browser.user_agent" semantic conventions. It represents the full
+ // user-agent string provided by the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)
+ // AppleWebKit/537.36 (KHTML, '
+ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+ // Note: The user-agent value SHOULD be provided only from browsers that do
+ // not have a mechanism to retrieve brands and platform individually from
+ // the User-Agent Client Hints API. To retrieve the value, the legacy
+ // `navigator.userAgent` API can be used.
+ BrowserUserAgentKey = attribute.Key("browser.user_agent")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserUserAgent returns an attribute KeyValue conforming to the
+// "browser.user_agent" semantic conventions. It represents the full user-agent
+// string provided by the browser
+func BrowserUserAgent(val string) attribute.KeyValue {
+ return BrowserUserAgentKey.String(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `faas.id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic
+ // conventions. It represents the unique ID of the single function that
+ // this runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so consider setting `faas.id` as a span attribute instead.
+ //
+ // The exact value to use for `faas.id` depends on the cloud provider:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ FaaSIDKey = attribute.Key("faas.id")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic
+// conventions. It represents the unique ID of the single function that this
+// runtime instance executes.
+func FaaSID(val string) attribute.KeyValue {
+ return FaaSIDKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function in MiB.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // Linux systems, the `machine-id` located in `/etc/machine-id` or
+ // `/var/lib/dbus/machine-id` may be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized Linux
+// systems, the `machine-id` located in `/etc/machine-id` or
+// `/var/lib/dbus/machine-id` may be used.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OtelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OtelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OtelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OtelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OtelScopeName(val string) attribute.KeyValue {
+ return OtelScopeNameKey.String(val)
+}
+
+// OtelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OtelScopeVersion(val string) attribute.KeyValue {
+ return OtelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OtelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OtelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OtelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OtelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OtelLibraryName(val string) attribute.KeyValue {
+ return OtelLibraryNameKey.String(val)
+}
+
+// OtelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OtelLibraryVersion(val string) attribute.KeyValue {
+ return OtelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
new file mode 100644
index 00000000000..42fc525d165
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
new file mode 100644
index 00000000000..8c4a7299d27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -0,0 +1,3375 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable and not
+ // explicitly disabled via instrumentation configuration.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OtelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OtelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OtelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OtelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
+)
+
+// OtelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OtelStatusDescription(val string) attribute.KeyValue {
+ return OtelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSExecutionKey is the attribute Key conforming to the "faas.execution"
+ // semantic conventions. It represents the execution ID of the current
+ // function execution.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSExecution returns an attribute KeyValue conforming to the
+// "faas.execution" semantic conventions. It represents the execution ID of the
+// current function execution.
+func FaaSExecution(val string) attribute.KeyValue {
+ return FaaSExecutionKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetAppProtocolNameKey is the attribute Key conforming to the
+ // "net.app.protocol.name" semantic conventions. It represents the
+ // application layer protocol used. The value SHOULD be normalized to
+ // lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
+
+ // NetAppProtocolVersionKey is the attribute Key conforming to the
+ // "net.app.protocol.version" semantic conventions. It represents the
+ // version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.app.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client used has a version of `0.27.2`, but sends HTTP version
+ // `1.1`, this attribute should be set to `1.1`.
+ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.host.port` and if `net.sock.host.addr` is set.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetAppProtocolName returns an attribute KeyValue conforming to the
+// "net.app.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetAppProtocolName(val string) attribute.KeyValue {
+ return NetAppProtocolNameKey.String(val)
+}
+
+// NetAppProtocolVersion returns an attribute KeyValue conforming to the
+// "net.app.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetAppProtocolVersion(val string) attribute.KeyValue {
+ return NetAppProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions. It represents the kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be
+ // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is
+ // assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions. It represents the value of the
+ // [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+var (
+ // HTTP/1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions. It represents the value of the [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: 'http.route' MUST NOT be populated when this is not supported by
+ // the HTTP server framework as the route attribute should have
+ // low-cardinality and the URI path can NOT substitute it.
+ HTTPRouteKey = attribute.Key("http.route")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationKindKey is the attribute Key conforming to the
+ // "messaging.destination.kind" semantic conventions. It represents the
+ // kind of message destination
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceKindKey is the attribute Key conforming to the
+ // "messaging.source.kind" semantic conventions. It represents the kind of
+ // message source
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceKindKey = attribute.Key("messaging.source.kind")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+var (
+ // A message received from a queue
+ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
+ // A message received from a topic
+ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
new file mode 100644
index 00000000000..67d1d4c44d7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
@@ -0,0 +1,1209 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTP Server spans attributes
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
+ // if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the application
+ // layer protocol used. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the version
+ // of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.protocol.version` refers to the version of the protocol used
+ // and might be different from the protocol client's version. If the HTTP
+ // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If defined for the address
+ // family and if different than `net.host.port` and if `net.sock.host.addr`
+ // is set. In other cases, it is still recommended to set this.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the version of
+// the application layer protocol used. See note below.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
new file mode 100644
index 00000000000..359c5a69624
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.20.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
new file mode 100644
index 00000000000..8ac9350d2b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
new file mode 100644
index 00000000000..09ff4dfdbf7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
new file mode 100644
index 00000000000..342aede95f1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
new file mode 100644
index 00000000000..a2b906742a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
@@ -0,0 +1,2071 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
new file mode 100644
index 00000000000..e449e5c3b9f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
new file mode 100644
index 00000000000..8517741485c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
@@ -0,0 +1,2610 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/users/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
index 7cf424855e9..0318b5ec48f 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go
@@ -15,6 +15,6 @@
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.21.0 version of the OpenTelemetry specification.
+// patterns for OpenTelemetry things. This package represents the v1.21.0
+// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index cb3efbb9ad8..3aadc66cf7a 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
c.stackTrace = bool(o)
return c
}
+
func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
c.stackTrace = bool(o)
return c
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
index ab0346f9664..440f3d7565a 100644
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -62,5 +62,69 @@ a default.
defer span.End()
// ...
}
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy; all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+ - Compilation failure
+ - Panic
+ - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
+example,
+
+ import "go.opentelemetry.io/otel/trace/embedded"
+
+ type TracerProvider struct {
+ embedded.TracerProvider
+ // ...
+ }
+
+If an author wants the default behavior of their implementations to panic, they
+can embed the API interface directly.
+
+ import "go.opentelemetry.io/otel/trace"
+
+ type TracerProvider struct {
+ trace.TracerProvider
+ // ...
+ }
+
+This option is not recommended. It will lead to publishing packages that
+contain runtime panics when users update to newer versions of
+[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+dependency.
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/trace/noop]:
+
+ import "go.opentelemetry.io/otel/trace/noop"
+
+ type TracerProvider struct {
+ noop.TracerProvider
+ // ...
+ }
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
*/
package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
new file mode 100644
index 00000000000..898db5a7546
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// trace API].
+//
+// Implementers of the [OpenTelemetry trace API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry trace API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
+package embedded // import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider is embedded in
+// [go.opentelemetry.io/otel/trace.TracerProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type TracerProvider interface{ tracerProvider() }
+
+// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Tracer interface{ tracer() }
+
+// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Span interface{ span() }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index 7cf6c7f3ef9..c125491caeb 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -19,16 +19,20 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
)
// NewNoopTracerProvider returns an implementation of TracerProvider that
// performs no operations. The Tracer and Spans created from the returned
// TracerProvider also perform no operations.
+//
+// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
+// instead.
func NewNoopTracerProvider() TracerProvider {
return noopTracerProvider{}
}
-type noopTracerProvider struct{}
+type noopTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = noopTracerProvider{}
@@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
}
// noopTracer is an implementation of Tracer that performs no operations.
-type noopTracer struct{}
+type noopTracer struct{ embedded.Tracer }
var _ Tracer = noopTracer{}
@@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
}
// noopSpan is an implementation of Span that performs no operations.
-type noopSpan struct{}
+type noopSpan struct{ embedded.Span }
var _ Span = noopSpan{}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
new file mode 100644
index 00000000000..7f485543c47
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry trace API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry trace API will effectively
+// disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry trace API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ trace.TracerProvider = TracerProvider{}
+ _ trace.Tracer = Tracer{}
+ _ trace.Span = Span{}
+)
+
+// TracerProvider is an OpenTelemetry No-Op TracerProvider.
+type TracerProvider struct{ embedded.TracerProvider }
+
+// NewTracerProvider returns a TracerProvider that does not record any telemetry.
+func NewTracerProvider() TracerProvider {
+ return TracerProvider{}
+}
+
+// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
+func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
+ return Tracer{}
+}
+
+// Tracer is an OpenTelemetry No-Op Tracer.
+type Tracer struct{ embedded.Tracer }
+
+// Start creates a span. The created span will be set in a child context of ctx
+// and returned with the span.
+//
+// If ctx contains a span context, the returned span will also contain that
+// span context. If the span context in ctx is for a non-recording span, that
+// span instance will be returned directly.
+func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+ span := trace.SpanFromContext(ctx)
+
+ // If the parent context contains a non-zero span context, that span
+ // context needs to be returned as a non-recording span
+ // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
+ var zeroSC trace.SpanContext
+ if sc := span.SpanContext(); !sc.Equal(zeroSC) {
+ if !span.IsRecording() {
+ // If the span is not recording return it directly.
+ return ctx, span
+ }
+ // Otherwise, return the span context needs in a non-recording span.
+ span = Span{sc: sc}
+ } else {
+ // No parent, return a No-Op span with an empty span context.
+ span = Span{}
+ }
+ return trace.ContextWithSpan(ctx, span), span
+}
+
+// Span is an OpenTelemetry No-Op Span.
+type Span struct {
+ embedded.Span
+
+ sc trace.SpanContext
+}
+
+// SpanContext returns an empty span context.
+func (s Span) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (Span) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (Span) SetStatus(codes.Code, string) {}
+
+// SetAttributes does nothing.
+func (Span) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (Span) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (Span) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (Span) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (Span) SetName(string) {}
+
+// TracerProvider returns a No-Op TracerProvider.
+func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index 4aa94f79f46..26a4b2260ec 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -22,6 +22,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
)
const (
@@ -48,8 +49,10 @@ func (e errorConst) Error() string {
// nolint:revive // revive complains about stutter of `trace.TraceID`.
type TraceID [16]byte
-var nilTraceID TraceID
-var _ json.Marshaler = nilTraceID
+var (
+ nilTraceID TraceID
+ _ json.Marshaler = nilTraceID
+)
// IsValid checks whether the trace TraceID is valid. A valid trace ID does
// not consist of zeros only.
@@ -71,8 +74,10 @@ func (t TraceID) String() string {
// SpanID is a unique identity of a span in a trace.
type SpanID [8]byte
-var nilSpanID SpanID
-var _ json.Marshaler = nilSpanID
+var (
+ nilSpanID SpanID
+ _ json.Marshaler = nilSpanID
+)
// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
// of zeros only.
@@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
// create a Span and it is then up to the operation the Span represents to
// properly end the Span when the operation itself ends.
//
-// Warning: methods may be added to this interface in minor releases.
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
@@ -486,8 +498,15 @@ func (sk SpanKind) String() string {
// Tracer is the creator of Spans.
//
-// Warning: methods may be added to this interface in minor releases.
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
// Start creates a span and a context.Context containing the newly-created span.
//
// If the context.Context provided in `ctx` contains a Span then the newly-created
@@ -518,8 +537,15 @@ type Tracer interface {
// at runtime from its users or it can simply use the globally registered one
// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
//
-// Warning: methods may be added to this interface in minor releases.
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
// Tracer returns a unique Tracer scoped to be used by instrumentation code
// to trace computational workflows. The scope and identity of that
// instrumentation code is uniquely defined by the name and options passed.
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index ca68a82e5f7..db936ba5b73 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -17,20 +17,14 @@ package trace // import "go.opentelemetry.io/otel/trace"
import (
"encoding/json"
"fmt"
- "regexp"
"strings"
)
const (
maxListMembers = 32
- listDelimiter = ","
-
- // based on the W3C Trace Context specification, see
- // https://www.w3.org/TR/trace-context-1/#tracestate-header
- noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
- withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
+ listDelimiters = ","
+ memberDelimiter = "="
errInvalidKey errorConst = "invalid tracestate key"
errInvalidValue errorConst = "invalid tracestate value"
@@ -39,43 +33,138 @@ const (
errDuplicate errorConst = "duplicate list-member in tracestate"
)
-var (
- keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`)
- valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`)
- memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
-)
-
type member struct {
Key string
Value string
}
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+ return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+ return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+// value = (0*255(chr)) nblk-chr
+// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+// chr = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+ n := len(val)
+ if n == 0 || n > 256 {
+ return false
+ }
+ for i := 0; i < n-1; i++ {
+ if !checkValueChar(val[i]) {
+ return false
+ }
+ }
+ return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+ // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ for _, v := range key {
+ if isAlphaNum(byte(v)) {
+ continue
+ }
+ switch v {
+ case '_', '-', '*', '/':
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// according to
+//
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ first := key[0] // key's first char
+ ret := len(key[1:]) <= n
+ ret = ret && first >= 'a' && first <= 'z'
+ return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+ if c >= 'a' && c <= 'z' {
+ return true
+ }
+ return c >= '0' && c <= '9'
+}
+
+// according to
+//
+// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+// key = simple-key / multi-tenant-key
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// multi-tenant-key = tenant-id "@" system-id
+// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// lcalpha = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+ tenant, system, ok := strings.Cut(key, "@")
+ if !ok {
+ return checkKeyPart(key, 255)
+ }
+ return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
func newMember(key, value string) (member, error) {
- if !keyRe.MatchString(key) {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+ if !checkKey(key) {
+ return member{}, errInvalidKey
}
- if !valueRe.MatchString(value) {
- return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
+ if !checkValue(value) {
+ return member{}, errInvalidValue
}
return member{Key: key, Value: value}, nil
}
func parseMember(m string) (member, error) {
- matches := memberRe.FindStringSubmatch(m)
- if len(matches) != 5 {
+ key, val, ok := strings.Cut(m, memberDelimiter)
+ if !ok {
return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
}
-
- return member{
- Key: matches[1],
- Value: matches[4],
- }, nil
+ key = strings.TrimLeft(key, " \t")
+ val = strings.TrimRight(val, " \t")
+ result, e := newMember(key, val)
+ if e != nil {
+ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+ }
+ return result, nil
}
// String encodes member into a string compliant with the W3C Trace Context
// specification.
func (m member) String() string {
- return fmt.Sprintf("%s=%s", m.Key, m.Value)
+ return m.Key + "=" + m.Value
}
// TraceState provides additional vendor-specific trace identification
@@ -99,8 +188,8 @@ var _ json.Marshaler = TraceState{}
// ParseTraceState attempts to decode a TraceState from the passed
// string. It returns an error if the input is invalid according to the W3C
// Trace Context specification.
-func ParseTraceState(tracestate string) (TraceState, error) {
- if tracestate == "" {
+func ParseTraceState(ts string) (TraceState, error) {
+ if ts == "" {
return TraceState{}, nil
}
@@ -110,7 +199,9 @@ func ParseTraceState(tracestate string) (TraceState, error) {
var members []member
found := make(map[string]struct{})
- for _, memberStr := range strings.Split(tracestate, listDelimiter) {
+ for ts != "" {
+ var memberStr string
+ memberStr, ts, _ = strings.Cut(ts, listDelimiters)
if len(memberStr) == 0 {
continue
}
@@ -143,11 +234,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) {
// Trace Context specification. The returned string will be invalid if the
// TraceState contains any invalid members.
func (ts TraceState) String() string {
- members := make([]string, len(ts.list))
- for i, m := range ts.list {
- members[i] = m.String()
+ if len(ts.list) == 0 {
+ return ""
+ }
+ var n int
+ n += len(ts.list) // member delimiters: '='
+ n += len(ts.list) - 1 // list delimiters: ','
+ for _, mem := range ts.list {
+ n += len(mem.Key)
+ n += len(mem.Value)
}
- return strings.Join(members, listDelimiter)
+
+ var sb strings.Builder
+ sb.Grow(n)
+ _, _ = sb.WriteString(ts.list[0].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[0].Value)
+ for i := 1; i < len(ts.list); i++ {
+ _ = sb.WriteByte(listDelimiters[0])
+ _, _ = sb.WriteString(ts.list[i].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[i].Value)
+ }
+ return sb.String()
}
// Get returns the value paired with key from the corresponding TraceState
@@ -179,15 +288,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) {
if err != nil {
return ts, err
}
-
- cTS := ts.Delete(key)
- if cTS.Len()+1 <= maxListMembers {
- cTS.list = append(cTS.list, member{})
+ n := len(ts.list)
+ found := n
+ for i := range ts.list {
+ if ts.list[i].Key == key {
+ found = i
+ }
+ }
+ cTS := TraceState{}
+ if found == n && n < maxListMembers {
+ cTS.list = make([]member, n+1)
+ } else {
+ cTS.list = make([]member, n)
}
- // When the number of members exceeds capacity, drop the "right-most".
- copy(cTS.list[1:], cTS.list)
cTS.list[0] = m
-
+ // When the number of members exceeds capacity, drop the "right-most".
+ copy(cTS.list[1:], ts.list[0:found])
+ if found < n {
+ copy(cTS.list[1+found:], ts.list[found+1:])
+ }
return cTS, nil
}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index ad64e199672..38ba951ed79 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.19.0"
+ return "1.23.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 7d212769240..028393f078d 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,20 +14,25 @@
module-sets:
stable-v1:
- version: v1.19.0
+ version: v1.23.0
modules:
- go.opentelemetry.io/otel
+ - go.opentelemetry.io/otel/bridge/opencensus
+ - go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/dice
- - go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/namedtracer
+ - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/otel-collector
- go.opentelemetry.io/otel/example/passthrough
- go.opentelemetry.io/otel/example/zipkin
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/metric
@@ -35,18 +40,10 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.42.0
+ version: v0.45.1
modules:
- - go.opentelemetry.io/otel/bridge/opencensus
- - go.opentelemetry.io/otel/bridge/opencensus/test
- - go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- - go.opentelemetry.io/otel/example/view
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus
- - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
experimental-schema:
version: v0.0.7
modules:
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
deleted file mode 100644
index d33c8890fc5..00000000000
--- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-
-package poly1305
-
-// Generic fallbacks for the math/bits intrinsics, copied from
-// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
-// variable time fallbacks until Go 1.13.
-
-func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
- sum = x + y + carry
- carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
- return
-}
-
-func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
- diff = x - y - borrow
- borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
- return
-}
-
-func bitsMul64(x, y uint64) (hi, lo uint64) {
- const mask32 = 1<<32 - 1
- x0 := x & mask32
- x1 := x >> 32
- y0 := y & mask32
- y1 := y >> 32
- w0 := x0 * y0
- t := x1*y0 + w0>>32
- w1 := t & mask32
- w2 := t >> 32
- w1 += x0 * y1
- hi = x1*y1 + w2 + w1>>32
- lo = x * y
- return
-}
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
deleted file mode 100644
index 495c1fa6972..00000000000
--- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-
-package poly1305
-
-import "math/bits"
-
-func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
- return bits.Add64(x, y, carry)
-}
-
-func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
- return bits.Sub64(x, y, borrow)
-}
-
-func bitsMul64(x, y uint64) (hi, lo uint64) {
- return bits.Mul64(x, y)
-}
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
index e041da5ea3e..ec2202bd7d5 100644
--- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -7,7 +7,10 @@
package poly1305
-import "encoding/binary"
+import (
+ "encoding/binary"
+ "math/bits"
+)
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
// for a 64 bytes message is approximately
@@ -114,13 +117,13 @@ type uint128 struct {
}
func mul64(a, b uint64) uint128 {
- hi, lo := bitsMul64(a, b)
+ hi, lo := bits.Mul64(a, b)
return uint128{lo, hi}
}
func add128(a, b uint128) uint128 {
- lo, c := bitsAdd64(a.lo, b.lo, 0)
- hi, c := bitsAdd64(a.hi, b.hi, c)
+ lo, c := bits.Add64(a.lo, b.lo, 0)
+ hi, c := bits.Add64(a.hi, b.hi, c)
if c != 0 {
panic("poly1305: unexpected overflow")
}
@@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) {
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
// add 1 to the most significant (2¹²â¸) limb, h2.
if len(msg) >= TagSize {
- h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
- h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
+ h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
+ h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
h2 += c + 1
msg = msg[TagSize:]
@@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) {
copy(buf[:], msg)
buf[len(msg)] = 1
- h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
- h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
+ h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
+ h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
h2 += c
msg = nil
@@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) {
m3 := h2r1
t0 := m0.lo
- t1, c := bitsAdd64(m1.lo, m0.hi, 0)
- t2, c := bitsAdd64(m2.lo, m1.hi, c)
- t3, _ := bitsAdd64(m3.lo, m2.hi, c)
+ t1, c := bits.Add64(m1.lo, m0.hi, 0)
+ t2, c := bits.Add64(m2.lo, m1.hi, c)
+ t3, _ := bits.Add64(m3.lo, m2.hi, c)
// Now we have the result as 4 64-bit limbs, and we need to reduce it
// modulo 2¹³Ⱐ- 5. The special shape of this Crandall prime lets us do
@@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) {
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
- h0, c = bitsAdd64(h0, cc.lo, 0)
- h1, c = bitsAdd64(h1, cc.hi, c)
+ h0, c = bits.Add64(h0, cc.lo, 0)
+ h1, c = bits.Add64(h1, cc.hi, c)
h2 += c
cc = shiftRightBy2(cc)
- h0, c = bitsAdd64(h0, cc.lo, 0)
- h1, c = bitsAdd64(h1, cc.hi, c)
+ h0, c = bits.Add64(h0, cc.lo, 0)
+ h1, c = bits.Add64(h1, cc.hi, c)
h2 += c
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
@@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
// in constant time, we compute t = h - (2¹³Ⱐ- 5), and select h as the
// result if the subtraction underflows, and t otherwise.
- hMinusP0, b := bitsSub64(h0, p0, 0)
- hMinusP1, b := bitsSub64(h1, p1, b)
- _, b = bitsSub64(h2, p2, b)
+ hMinusP0, b := bits.Sub64(h0, p0, 0)
+ hMinusP1, b := bits.Sub64(h1, p1, b)
+ _, b = bits.Sub64(h2, p2, b)
// h = h if h < p else h - p
h0 = select64(b, h0, hMinusP0)
@@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
//
// by just doing a wide addition with the 128 low bits of h and discarding
// the overflow.
- h0, c := bitsAdd64(h0, s[0], 0)
- h1, _ = bitsAdd64(h1, s[1], c)
+ h0, c := bits.Add64(h0, s[0], 0)
+ h1, _ = bits.Add64(h1, s[1], c)
binary.LittleEndian.PutUint64(out[0:8], h0)
binary.LittleEndian.PutUint64(out[8:16], h1)
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
index d2ca5deeb9f..b3c1699bff5 100644
--- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
@@ -19,15 +19,14 @@
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
MULLD r0, h0, t0; \
- MULLD r0, h1, t4; \
MULHDU r0, h0, t1; \
+ MULLD r0, h1, t4; \
MULHDU r0, h1, t5; \
ADDC t4, t1, t1; \
MULLD r0, h2, t2; \
- ADDZE t5; \
MULHDU r1, h0, t4; \
MULLD r1, h0, h0; \
- ADD t5, t2, t2; \
+ ADDE t5, t2, t2; \
ADDC h0, t1, t1; \
MULLD h2, r1, t3; \
ADDZE t4, h0; \
@@ -37,13 +36,11 @@
ADDE t5, t3, t3; \
ADDC h0, t2, t2; \
MOVD $-4, t4; \
- MOVD t0, h0; \
- MOVD t1, h1; \
ADDZE t3; \
- ANDCC $3, t2, h2; \
- AND t2, t4, t0; \
+ RLDICL $0, t2, $62, h2; \
+ AND t2, t4, h0; \
ADDC t0, h0, h0; \
- ADDE t3, h1, h1; \
+ ADDE t3, t1, h1; \
SLD $62, t3, t4; \
SRD $2, t2; \
ADDZE h2; \
@@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32
loop:
POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
+ PCALIGN $16
multiply:
POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
ADD $-16, R5
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
index 4269ed113be..bf2259537d2 100644
--- a/vendor/golang.org/x/crypto/ocsp/ocsp.go
+++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
@@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier {
// This is the exposed reflection of the internal OCSP structures.
-// The status values that can be expressed in OCSP. See RFC 6960.
+// The status values that can be expressed in OCSP. See RFC 6960.
+// These are used for the Response.Status field.
const (
// Good means that the certificate is valid.
- Good = iota
+ Good = 0
// Revoked means that the certificate has been deliberately revoked.
- Revoked
+ Revoked = 1
// Unknown means that the OCSP responder doesn't know about the certificate.
- Unknown
+ Unknown = 2
// ServerFailed is unused and was never used (see
// https://go-review.googlesource.com/#/c/18944). ParseResponse will
// return a ResponseError when an error response is parsed.
- ServerFailed
+ ServerFailed = 3
)
-// The enumerated reasons for revoking a certificate. See RFC 5280.
+// The enumerated reasons for revoking a certificate. See RFC 5280.
const (
Unspecified = 0
KeyCompromise = 1
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index c1f6b90dc32..e2b298d8593 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
}
func (fr *Framer) maxHeaderStringLen() int {
- v := fr.maxHeaderListSize()
- if uint32(int(v)) == v {
- return int(v)
+ v := int(fr.maxHeaderListSize())
+ if v < 0 {
+ // If maxHeaderListSize overflows an int, use no limit (0).
+ return 0
}
- // They had a crazy big number for MaxHeaderBytes anyway,
- // so give them unlimited header lengths:
- return 0
+ return v
}
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index 12b12a30c55..02ccd08a770 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -12,6 +12,7 @@ import (
"os"
"path/filepath"
"runtime"
+ "sync"
"time"
"cloud.google.com/go/compute/metadata"
@@ -41,12 +42,20 @@ type Credentials struct {
// running on Google Cloud Platform.
JSON []byte
+ udMu sync.Mutex // guards universeDomain
// universeDomain is the default service domain for a given Cloud universe.
universeDomain string
}
// UniverseDomain returns the default service domain for a given Cloud universe.
+//
// The default value is "googleapis.com".
+//
+// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports
+// obtaining the universe domain when authenticating via the GCE metadata server.
+// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the
+// default value when authenticating via the GCE metadata server.
+// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
func (c *Credentials) UniverseDomain() string {
if c.universeDomain == "" {
return universeDomainDefault
@@ -54,6 +63,55 @@ func (c *Credentials) UniverseDomain() string {
return c.universeDomain
}
+// GetUniverseDomain returns the default service domain for a given Cloud
+// universe.
+//
+// The default value is "googleapis.com".
+//
+// It obtains the universe domain from the attached service account on GCE when
+// authenticating via the GCE metadata server. See also [The attached service
+// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
+// If the GCE metadata server returns a 404 error, the default value is
+// returned. If the GCE metadata server returns an error other than 404, the
+// error is returned.
+func (c *Credentials) GetUniverseDomain() (string, error) {
+ c.udMu.Lock()
+ defer c.udMu.Unlock()
+ if c.universeDomain == "" && metadata.OnGCE() {
+ // If we're on Google Compute Engine, an App Engine standard second
+ // generation runtime, or App Engine flexible, use the metadata server.
+ err := c.computeUniverseDomain()
+ if err != nil {
+ return "", err
+ }
+ }
+ // If not on Google Compute Engine, or in case of any non-error path in
+ // computeUniverseDomain that did not set universeDomain, set the default
+ // universe domain.
+ if c.universeDomain == "" {
+ c.universeDomain = universeDomainDefault
+ }
+ return c.universeDomain, nil
+}
+
+// computeUniverseDomain fetches the default service domain for a given Cloud
+// universe from Google Compute Engine (GCE)'s metadata server. It's only valid
+// to use this method if your program is running on a GCE instance.
+func (c *Credentials) computeUniverseDomain() error {
+ var err error
+ c.universeDomain, err = metadata.Get("universe/universe_domain")
+ if err != nil {
+ if _, ok := err.(metadata.NotDefinedError); ok {
+ // http.StatusNotFound (404)
+ c.universeDomain = universeDomainDefault
+ return nil
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
// DefaultCredentials is the old name of Credentials.
//
// Deprecated: use Credentials instead.
@@ -91,6 +149,12 @@ type CredentialsParams struct {
// Note: This option is currently only respected when using credentials
// fetched from the GCE metadata server.
EarlyTokenRefresh time.Duration
+
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // Only supported in authentication flows that support universe domains.
+ // This value takes precedence over a universe domain explicitly specified
+ // in a credentials config file or by the GCE metadata server. Optional.
+ UniverseDomain string
}
func (params CredentialsParams) deepCopy() CredentialsParams {
@@ -175,8 +239,9 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar
if metadata.OnGCE() {
id, _ := metadata.ProjectID()
return &Credentials{
- ProjectID: id,
- TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
+ ProjectID: id,
+ TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
+ universeDomain: params.UniverseDomain,
}, nil
}
@@ -217,6 +282,9 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params
}
universeDomain := f.UniverseDomain
+ if params.UniverseDomain != "" {
+ universeDomain = params.UniverseDomain
+ }
// Authorized user credentials are only supported in the googleapis.com universe.
if f.Type == userCredentialsKey {
universeDomain = universeDomainDefault
diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go
index 6497dc022ef..843d1c33020 100644
--- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go
+++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go
@@ -19,7 +19,7 @@ import (
"time"
)
-var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken")
+var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken")
const (
executableSupportedMaxVersion = 1
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index b18efb743fe..948a3ee63d4 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -4,6 +4,9 @@
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
+//
+// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
+// returning errors.
package errgroup
import (
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6202638bae8..fdcaa974d23 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -248,6 +248,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -283,10 +284,6 @@ struct ltchars {
#include
#endif
-#ifndef MSG_FASTOPEN
-#define MSG_FASTOPEN 0x20000000
-#endif
-
#ifndef PTRACE_GETREGS
#define PTRACE_GETREGS 0xc
#endif
@@ -295,14 +292,6 @@ struct ltchars {
#define PTRACE_SETREGS 0xd
#endif
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
-#ifndef SOL_SMC
-#define SOL_SMC 286
-#endif
-
#ifdef SOL_BLUETOOTH
// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
// but it is already in bluetooth_linux.go
@@ -319,10 +308,23 @@ struct ltchars {
#undef TIPC_WAIT_FOREVER
#define TIPC_WAIT_FOREVER 0xffffffff
-// Copied from linux/l2tp.h
-// Including linux/l2tp.h here causes conflicts between linux/in.h
-// and netinet/in.h included via net/route.h above.
-#define IPPROTO_L2TP 115
+// Copied from linux/netfilter/nf_nat.h
+// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h
+// and netinet/in.h.
+#define NF_NAT_RANGE_MAP_IPS (1 << 0)
+#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1)
+#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
+#define NF_NAT_RANGE_PERSISTENT (1 << 3)
+#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
+#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
+#define NF_NAT_RANGE_NETMAP (1 << 6)
+#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
+ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+#define NF_NAT_RANGE_MASK \
+ (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
+ NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \
+ NF_NAT_RANGE_NETMAP)
// Copied from linux/hid.h.
// Keep in sync with the size of the referenced fields.
@@ -582,7 +584,7 @@ ccflags="$@"
$2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ ||
$2 ~ /^KEYCTL_/ ||
$2 ~ /^PERF_/ ||
- $2 ~ /^SECCOMP_MODE_/ ||
+ $2 ~ /^SECCOMP_/ ||
$2 ~ /^SEEK_/ ||
$2 ~ /^SCHED_/ ||
$2 ~ /^SPLICE_/ ||
@@ -603,6 +605,9 @@ ccflags="$@"
$2 ~ /^FSOPT_/ ||
$2 ~ /^WDIO[CFS]_/ ||
$2 ~ /^NFN/ ||
+ $2 !~ /^NFT_META_IIFTYPE/ &&
+ $2 ~ /^NFT_/ ||
+ $2 ~ /^NF_NAT_/ ||
$2 ~ /^XDP_/ ||
$2 ~ /^RWF_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ ||
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index c73cfe2f10b..36bf8399f4f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1785,6 +1785,8 @@ const (
LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20
LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
+ LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
+ LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1
LINUX_REBOOT_CMD_CAD_OFF = 0x0
LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
@@ -2127,6 +2129,60 @@ const (
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
+ NFT_CHAIN_FLAGS = 0x7
+ NFT_CHAIN_MAXNAMELEN = 0x100
+ NFT_CT_MAX = 0x17
+ NFT_DATA_RESERVED_MASK = 0xffffff00
+ NFT_DATA_VALUE_MAXLEN = 0x40
+ NFT_EXTHDR_OP_MAX = 0x4
+ NFT_FIB_RESULT_MAX = 0x3
+ NFT_INNER_MASK = 0xf
+ NFT_LOGLEVEL_MAX = 0x8
+ NFT_NAME_MAXLEN = 0x100
+ NFT_NG_MAX = 0x1
+ NFT_OBJECT_CONNLIMIT = 0x5
+ NFT_OBJECT_COUNTER = 0x1
+ NFT_OBJECT_CT_EXPECT = 0x9
+ NFT_OBJECT_CT_HELPER = 0x3
+ NFT_OBJECT_CT_TIMEOUT = 0x7
+ NFT_OBJECT_LIMIT = 0x4
+ NFT_OBJECT_MAX = 0xa
+ NFT_OBJECT_QUOTA = 0x2
+ NFT_OBJECT_SECMARK = 0x8
+ NFT_OBJECT_SYNPROXY = 0xa
+ NFT_OBJECT_TUNNEL = 0x6
+ NFT_OBJECT_UNSPEC = 0x0
+ NFT_OBJ_MAXNAMELEN = 0x100
+ NFT_OSF_MAXGENRELEN = 0x10
+ NFT_QUEUE_FLAG_BYPASS = 0x1
+ NFT_QUEUE_FLAG_CPU_FANOUT = 0x2
+ NFT_QUEUE_FLAG_MASK = 0x3
+ NFT_REG32_COUNT = 0x10
+ NFT_REG32_SIZE = 0x4
+ NFT_REG_MAX = 0x4
+ NFT_REG_SIZE = 0x10
+ NFT_REJECT_ICMPX_MAX = 0x3
+ NFT_RT_MAX = 0x4
+ NFT_SECMARK_CTX_MAXLEN = 0x100
+ NFT_SET_MAXNAMELEN = 0x100
+ NFT_SOCKET_MAX = 0x3
+ NFT_TABLE_F_MASK = 0x3
+ NFT_TABLE_MAXNAMELEN = 0x100
+ NFT_TRACETYPE_MAX = 0x3
+ NFT_TUNNEL_F_MASK = 0x7
+ NFT_TUNNEL_MAX = 0x1
+ NFT_TUNNEL_MODE_MAX = 0x2
+ NFT_USERDATA_MAXLEN = 0x100
+ NFT_XFRM_KEY_MAX = 0x6
+ NF_NAT_RANGE_MAP_IPS = 0x1
+ NF_NAT_RANGE_MASK = 0x7f
+ NF_NAT_RANGE_NETMAP = 0x40
+ NF_NAT_RANGE_PERSISTENT = 0x8
+ NF_NAT_RANGE_PROTO_OFFSET = 0x20
+ NF_NAT_RANGE_PROTO_RANDOM = 0x4
+ NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10
+ NF_NAT_RANGE_PROTO_SPECIFIED = 0x2
NILFS_SUPER_MAGIC = 0x3434
NL0 = 0x0
NL1 = 0x100
@@ -2411,6 +2467,7 @@ const (
PR_MCE_KILL_GET = 0x22
PR_MCE_KILL_LATE = 0x0
PR_MCE_KILL_SET = 0x1
+ PR_MDWE_NO_INHERIT = 0x2
PR_MDWE_REFUSE_EXEC_GAIN = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b
@@ -2615,8 +2672,9 @@ const (
RTAX_FEATURES = 0xc
RTAX_FEATURE_ALLFRAG = 0x8
RTAX_FEATURE_ECN = 0x1
- RTAX_FEATURE_MASK = 0xf
+ RTAX_FEATURE_MASK = 0x1f
RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TCP_USEC_TS = 0x10
RTAX_FEATURE_TIMESTAMP = 0x4
RTAX_HOPLIMIT = 0xa
RTAX_INITCWND = 0xb
@@ -2859,9 +2917,38 @@ const (
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x1d
SC_LOG_FLUSH = 0x100000
+ SECCOMP_ADDFD_FLAG_SEND = 0x2
+ SECCOMP_ADDFD_FLAG_SETFD = 0x1
+ SECCOMP_FILTER_FLAG_LOG = 0x2
+ SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4
+ SECCOMP_FILTER_FLAG_TSYNC = 0x1
+ SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10
+ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20
+ SECCOMP_GET_ACTION_AVAIL = 0x2
+ SECCOMP_GET_NOTIF_SIZES = 0x3
+ SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100
+ SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101
+ SECCOMP_IOC_MAGIC = '!'
SECCOMP_MODE_DISABLED = 0x0
SECCOMP_MODE_FILTER = 0x2
SECCOMP_MODE_STRICT = 0x1
+ SECCOMP_RET_ACTION = 0x7fff0000
+ SECCOMP_RET_ACTION_FULL = 0xffff0000
+ SECCOMP_RET_ALLOW = 0x7fff0000
+ SECCOMP_RET_DATA = 0xffff
+ SECCOMP_RET_ERRNO = 0x50000
+ SECCOMP_RET_KILL = 0x0
+ SECCOMP_RET_KILL_PROCESS = 0x80000000
+ SECCOMP_RET_KILL_THREAD = 0x0
+ SECCOMP_RET_LOG = 0x7ffc0000
+ SECCOMP_RET_TRACE = 0x7ff00000
+ SECCOMP_RET_TRAP = 0x30000
+ SECCOMP_RET_USER_NOTIF = 0x7fc00000
+ SECCOMP_SET_MODE_FILTER = 0x1
+ SECCOMP_SET_MODE_STRICT = 0x0
+ SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1
+ SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1
SECRETMEM_MAGIC = 0x5345434d
SECURITYFS_MAGIC = 0x73636673
SEEK_CUR = 0x1
@@ -3021,6 +3108,7 @@ const (
SOL_TIPC = 0x10f
SOL_TLS = 0x11a
SOL_UDP = 0x11
+ SOL_VSOCK = 0x11f
SOL_X25 = 0x106
SOL_XDP = 0x11b
SOMAXCONN = 0x1000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 4920821cf3b..42ff8c3c1b0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index a0c1e411275..dca436004fa 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -282,6 +282,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index c63985560f6..5cca668ac30 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -288,6 +288,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 47cc62e25c1..d8cae6d1534 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -278,6 +278,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 27ac4a09e22..28e39afdcb4 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -275,6 +275,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 54694642a5d..cd66e92cb42 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 3adb81d7582..c1595eba78e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 2dfe98f0d1b..ee9456b0da7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index f5398f84f04..8cfca81e1b5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index c54f152d68f..60b0deb3af7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -336,6 +336,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 76057dc72fb..f90aa7281bf 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -340,6 +340,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e0c3725e2b8..ba9e0150338 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -340,6 +340,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 18f2813ed54..07cdfd6e9fd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -272,6 +272,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 11619d4ec88..2f1dd214a74 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -344,6 +344,9 @@ const (
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 396d994da79..f40519d9018 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -335,6 +335,9 @@ const (
SCM_TIMESTAMPNS = 0x21
SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25
+ SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
+ SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
+ SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x400000
SFD_NONBLOCK = 0x4000
SF_FP = 0x38
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index a1d061597cc..9dc42410b78 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 5b2a7409778..0d3a0751cd4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index f6eda1344a8..c39f7776db3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 55df20ae9d8..57571d072fe 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index 8c1155cbc08..e62963e67e2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index 7cc80c58d98..00831354c82 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 0688737f494..79029ed5848 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index fcf3ecbddee..0cc3ce496e2 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -448,4 +448,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index f56dc2504ae..856d92d69ef 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -371,4 +371,7 @@ const (
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 974bf246767..8d467094cf5 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -412,4 +412,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 39a2739e231..edc173244d0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -315,4 +315,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index cf9c9d77e10..445eba20615 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -309,4 +309,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 10b7362ef44..adba01bca70 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -432,4 +432,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 4450
SYS_CACHESTAT = 4451
SYS_FCHMODAT2 = 4452
+ SYS_MAP_SHADOW_STACK = 4453
+ SYS_FUTEX_WAKE = 4454
+ SYS_FUTEX_WAIT = 4455
+ SYS_FUTEX_REQUEUE = 4456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index cd4d8b4fd35..014c4e9c7a7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -362,4 +362,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 5450
SYS_CACHESTAT = 5451
SYS_FCHMODAT2 = 5452
+ SYS_MAP_SHADOW_STACK = 5453
+ SYS_FUTEX_WAKE = 5454
+ SYS_FUTEX_WAIT = 5455
+ SYS_FUTEX_REQUEUE = 5456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 2c0efca818b..ccc97d74d05 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -362,4 +362,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 5450
SYS_CACHESTAT = 5451
SYS_FCHMODAT2 = 5452
+ SYS_MAP_SHADOW_STACK = 5453
+ SYS_FUTEX_WAKE = 5454
+ SYS_FUTEX_WAIT = 5455
+ SYS_FUTEX_REQUEUE = 5456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index a72e31d391d..ec2b64a95d7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -432,4 +432,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 4450
SYS_CACHESTAT = 4451
SYS_FCHMODAT2 = 4452
+ SYS_MAP_SHADOW_STACK = 4453
+ SYS_FUTEX_WAKE = 4454
+ SYS_FUTEX_WAIT = 4455
+ SYS_FUTEX_REQUEUE = 4456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index c7d1e374713..21a839e338b 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -439,4 +439,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index f4d4838c870..c11121ec3b4 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -411,4 +411,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index b64f0e59114..909b631fcb4 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -411,4 +411,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 95711195a06..e49bed16ea6 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -316,4 +316,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index f94e943bc4f..66017d2d32b 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -377,4 +377,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index ba0c2bc5154..47bab18dced 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -390,4 +390,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452
+ SYS_MAP_SHADOW_STACK = 453
+ SYS_FUTEX_WAKE = 454
+ SYS_FUTEX_WAIT = 455
+ SYS_FUTEX_REQUEUE = 456
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index bbf8399ff58..dc0c955eecd 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -174,7 +174,8 @@ type FscryptPolicyV2 struct {
Contents_encryption_mode uint8
Filenames_encryption_mode uint8
Flags uint8
- _ [4]uint8
+ Log2_data_unit_size uint8
+ _ [3]uint8
Master_key_identifier [16]uint8
}
@@ -455,60 +456,63 @@ type Ucred struct {
}
type TCPInfo struct {
- State uint8
- Ca_state uint8
- Retransmits uint8
- Probes uint8
- Backoff uint8
- Options uint8
- Rto uint32
- Ato uint32
- Snd_mss uint32
- Rcv_mss uint32
- Unacked uint32
- Sacked uint32
- Lost uint32
- Retrans uint32
- Fackets uint32
- Last_data_sent uint32
- Last_ack_sent uint32
- Last_data_recv uint32
- Last_ack_recv uint32
- Pmtu uint32
- Rcv_ssthresh uint32
- Rtt uint32
- Rttvar uint32
- Snd_ssthresh uint32
- Snd_cwnd uint32
- Advmss uint32
- Reordering uint32
- Rcv_rtt uint32
- Rcv_space uint32
- Total_retrans uint32
- Pacing_rate uint64
- Max_pacing_rate uint64
- Bytes_acked uint64
- Bytes_received uint64
- Segs_out uint32
- Segs_in uint32
- Notsent_bytes uint32
- Min_rtt uint32
- Data_segs_in uint32
- Data_segs_out uint32
- Delivery_rate uint64
- Busy_time uint64
- Rwnd_limited uint64
- Sndbuf_limited uint64
- Delivered uint32
- Delivered_ce uint32
- Bytes_sent uint64
- Bytes_retrans uint64
- Dsack_dups uint32
- Reord_seen uint32
- Rcv_ooopack uint32
- Snd_wnd uint32
- Rcv_wnd uint32
- Rehash uint32
+ State uint8
+ Ca_state uint8
+ Retransmits uint8
+ Probes uint8
+ Backoff uint8
+ Options uint8
+ Rto uint32
+ Ato uint32
+ Snd_mss uint32
+ Rcv_mss uint32
+ Unacked uint32
+ Sacked uint32
+ Lost uint32
+ Retrans uint32
+ Fackets uint32
+ Last_data_sent uint32
+ Last_ack_sent uint32
+ Last_data_recv uint32
+ Last_ack_recv uint32
+ Pmtu uint32
+ Rcv_ssthresh uint32
+ Rtt uint32
+ Rttvar uint32
+ Snd_ssthresh uint32
+ Snd_cwnd uint32
+ Advmss uint32
+ Reordering uint32
+ Rcv_rtt uint32
+ Rcv_space uint32
+ Total_retrans uint32
+ Pacing_rate uint64
+ Max_pacing_rate uint64
+ Bytes_acked uint64
+ Bytes_received uint64
+ Segs_out uint32
+ Segs_in uint32
+ Notsent_bytes uint32
+ Min_rtt uint32
+ Data_segs_in uint32
+ Data_segs_out uint32
+ Delivery_rate uint64
+ Busy_time uint64
+ Rwnd_limited uint64
+ Sndbuf_limited uint64
+ Delivered uint32
+ Delivered_ce uint32
+ Bytes_sent uint64
+ Bytes_retrans uint64
+ Dsack_dups uint32
+ Reord_seen uint32
+ Rcv_ooopack uint32
+ Snd_wnd uint32
+ Rcv_wnd uint32
+ Rehash uint32
+ Total_rto uint16
+ Total_rto_recoveries uint16
+ Total_rto_time uint32
}
type CanFilter struct {
@@ -551,7 +555,7 @@ const (
SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
- SizeofTCPInfo = 0xf0
+ SizeofTCPInfo = 0xf8
SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8
)
@@ -3399,7 +3403,7 @@ const (
DEVLINK_PORT_FN_ATTR_STATE = 0x2
DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3
DEVLINK_PORT_FN_ATTR_CAPS = 0x4
- DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4
+ DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5
)
type FsverityDigest struct {
@@ -4183,7 +4187,8 @@ const (
)
type LandlockRulesetAttr struct {
- Access_fs uint64
+ Access_fs uint64
+ Access_net uint64
}
type LandlockPathBeneathAttr struct {
@@ -5134,7 +5139,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x1b
+ NL80211_FREQUENCY_ATTR_MAX = 0x1c
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
@@ -5547,7 +5552,7 @@ const (
NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2
NL80211_REGDOM_TYPE_INTERSECTION = 0x3
NL80211_REGDOM_TYPE_WORLD = 0x1
- NL80211_REG_RULE_ATTR_MAX = 0x7
+ NL80211_REG_RULE_ATTR_MAX = 0x8
NL80211_REKEY_DATA_AKM = 0x4
NL80211_REKEY_DATA_KCK = 0x2
NL80211_REKEY_DATA_KEK = 0x1
diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go
index b8ad1925068..d4577a42388 100644
--- a/vendor/golang.org/x/sys/windows/env_windows.go
+++ b/vendor/golang.org/x/sys/windows/env_windows.go
@@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) {
return nil, err
}
defer DestroyEnvironmentBlock(block)
- blockp := unsafe.Pointer(block)
- for {
- entry := UTF16PtrToString((*uint16)(blockp))
- if len(entry) == 0 {
- break
+ size := unsafe.Sizeof(*block)
+ for *block != 0 {
+ // find NUL terminator
+ end := unsafe.Pointer(block)
+ for *(*uint16)(end) != 0 {
+ end = unsafe.Add(end, size)
}
- env = append(env, entry)
- blockp = unsafe.Add(blockp, 2*(len(entry)+1))
+
+ entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size)
+ env = append(env, UTF16ToString(entry))
+ block = (*uint16)(unsafe.Add(end, size))
}
return env, nil
}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 47dc5796769..6395a031d45 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string {
for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ {
ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p))
}
-
- return string(utf16.Decode(unsafe.Slice(p, n)))
+ return UTF16ToString(unsafe.Slice(p, n))
}
func Getpagesize() int { return 4096 }
@@ -194,6 +193,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW
//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
//sys SetEndOfFile(handle Handle) (err error)
+//sys SetFileValidData(handle Handle, validDataLength int64) (err error)
//sys GetSystemTimeAsFileTime(time *Filetime)
//sys GetSystemTimePreciseAsFileTime(time *Filetime)
//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff]
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 146a1f0196f..e8791c82c30 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -342,6 +342,7 @@ var (
procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories")
procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW")
procSetEndOfFile = modkernel32.NewProc("SetEndOfFile")
+ procSetFileValidData = modkernel32.NewProc("SetFileValidData")
procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW")
procSetErrorMode = modkernel32.NewProc("SetErrorMode")
procSetEvent = modkernel32.NewProc("SetEvent")
@@ -2988,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) {
return
}
+func SetFileValidData(handle Handle, validDataLength int64) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
if r1 == 0 {
diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go
index 829383f55b5..fbf4ef1c6e1 100644
--- a/vendor/google.golang.org/api/internal/cba.go
+++ b/vendor/google.golang.org/api/internal/cba.go
@@ -35,6 +35,7 @@ package internal
import (
"context"
"crypto/tls"
+ "errors"
"net"
"net/url"
"os"
@@ -53,6 +54,12 @@ const (
// Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false.
googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A"
+
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+)
+
+var (
+ errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
)
// getClientCertificateSourceAndEndpoint is a convenience function that invokes
@@ -67,6 +74,14 @@ func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source,
if err != nil {
return nil, "", err
}
+ // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359
+ if settings.Endpoint == "" && !settings.IsUniverseDomainGDU() && settings.DefaultEndpointTemplate != "" {
+ // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359
+ // if settings.DefaultEndpointTemplate == "" {
+ // return nil, "", errors.New("internaloption.WithDefaultEndpointTemplate is required if option.WithUniverseDomain is not googleapis.com")
+ // }
+ endpoint = resolvedDefaultEndpoint(settings)
+ }
return clientCertSource, endpoint, nil
}
@@ -80,9 +95,7 @@ type transportConfig struct {
func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings)
if err != nil {
- return &transportConfig{
- clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "",
- }, err
+ return nil, err
}
defaultTransportConfig := transportConfig{
clientCertSource: clientCertSource,
@@ -94,12 +107,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
if !shouldUseS2A(clientCertSource, settings) {
return &defaultTransportConfig, nil
}
-
- s2aMTLSEndpoint := settings.DefaultMTLSEndpoint
- // If there is endpoint override, honor it.
- if settings.Endpoint != "" {
- s2aMTLSEndpoint = endpoint
+ if !settings.IsUniverseDomainGDU() {
+ return nil, errUniverseNotSupportedMTLS
}
+
s2aAddress := GetS2AAddress()
if s2aAddress == "" {
return &defaultTransportConfig, nil
@@ -108,7 +119,7 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) {
clientCertSource: clientCertSource,
endpoint: endpoint,
s2aAddress: s2aAddress,
- s2aMTLSEndpoint: s2aMTLSEndpoint,
+ s2aMTLSEndpoint: settings.DefaultMTLSEndpoint,
}, nil
}
@@ -153,24 +164,41 @@ func isClientCertificateEnabled() bool {
// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz"
func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) {
if settings.Endpoint == "" {
- mtlsMode := getMTLSMode()
- if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
+ if isMTLS(clientCertSource) {
+ if !settings.IsUniverseDomainGDU() {
+ return "", errUniverseNotSupportedMTLS
+ }
return settings.DefaultMTLSEndpoint, nil
}
- return settings.DefaultEndpoint, nil
+ return resolvedDefaultEndpoint(settings), nil
}
if strings.Contains(settings.Endpoint, "://") {
// User passed in a full URL path, use it verbatim.
return settings.Endpoint, nil
}
- if settings.DefaultEndpoint == "" {
+ if resolvedDefaultEndpoint(settings) == "" {
// If DefaultEndpoint is not configured, use the user provided endpoint verbatim.
// This allows a naked "host[:port]" URL to be used with GRPC Direct Path.
return settings.Endpoint, nil
}
// Assume user-provided endpoint is host[:port], merge it with the default endpoint.
- return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint)
+ return mergeEndpoints(resolvedDefaultEndpoint(settings), settings.Endpoint)
+}
+
+func isMTLS(clientCertSource cert.Source) bool {
+ mtlsMode := getMTLSMode()
+ return mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto)
+}
+
+// resolvedDefaultEndpoint returns the DefaultEndpointTemplate merged with the
+// Universe Domain if the DefaultEndpointTemplate is set, otherwise returns the
+// deprecated DefaultEndpoint value.
+func resolvedDefaultEndpoint(settings *DialSettings) string {
+ if settings.DefaultEndpointTemplate == "" {
+ return settings.DefaultEndpoint
+ }
+ return strings.Replace(settings.DefaultEndpointTemplate, universeDomainPlaceholder, settings.GetUniverseDomain(), 1)
}
func getMTLSMode() string {
@@ -274,25 +302,15 @@ func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool {
if !isGoogleS2AEnabled() {
return false
}
- // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A.
- if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" {
- return false
- }
- // If MTLS is not enabled for this endpoint, skip S2A.
- if !mtlsEndpointEnabledForS2A() {
+ // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A.
+ if settings.DefaultMTLSEndpoint == "" || settings.Endpoint != "" {
return false
}
// If custom HTTP client is provided, skip S2A.
if settings.HTTPClient != nil {
return false
}
- return true
-}
-
-// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection.
-var mtlsEndpointEnabledForS2A = func() bool {
- // TODO(xmenxk): determine this via discovery config.
- return true
+ return !settings.EnableDirectPath && !settings.EnableDirectPathXds
}
func isGoogleS2AEnabled() bool {
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
index 05165f333b0..b6489309851 100644
--- a/vendor/google.golang.org/api/internal/creds.go
+++ b/vendor/google.golang.org/api/internal/creds.go
@@ -16,6 +16,7 @@ import (
"time"
"golang.org/x/oauth2"
+ "google.golang.org/api/internal/cert"
"google.golang.org/api/internal/impersonate"
"golang.org/x/oauth2/google"
@@ -90,11 +91,11 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g
// Determine configurations for the OAuth2 transport, which is separate from the API transport.
// The OAuth2 transport and endpoint will be configured for mTLS if applicable.
- clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds))
+ clientCertSource, err := getClientCertificateSource(ds)
if err != nil {
return nil, err
}
- params.TokenURL = oauth2Endpoint
+ params.TokenURL = oAuth2Endpoint(clientCertSource)
if clientCertSource != nil {
tlsConfig := &tls.Config{
GetClientCertificate: clientCertSource,
@@ -124,22 +125,37 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g
return cred, err
}
+func oAuth2Endpoint(clientCertSource cert.Source) string {
+ if isMTLS(clientCertSource) {
+ return google.MTLSTokenURL
+ }
+ return google.Endpoint.TokenURL
+}
+
func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) {
- if (ds.EnableJwtWithScope || ds.HasCustomAudience()) &&
- ds.ImpersonationConfig == nil {
- // Check if JSON is a service account and if so create a self-signed JWT.
- var f struct {
- Type string `json:"type"`
- // The rest JSON fields are omitted because they are not used.
- }
- if err := json.Unmarshal(data, &f); err != nil {
- return false, err
- }
- return f.Type == serviceAccountKey, nil
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs with scopes.
+ if !ds.IsUniverseDomainGDU() {
+ return typeServiceAccount(data)
+ }
+ if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && ds.ImpersonationConfig == nil {
+ return typeServiceAccount(data)
}
return false, nil
}
+// typeServiceAccount checks if JSON data is for a service account.
+func typeServiceAccount(data []byte) (bool, error) {
+ var f struct {
+ Type string `json:"type"`
+ // The remaining JSON fields are omitted because they are not used.
+ }
+ if err := json.Unmarshal(data, &f); err != nil {
+ return false, err
+ }
+ return f.Type == serviceAccountKey, nil
+}
+
func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) {
if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() {
// Scopes are preferred in self-signed JWT unless the scope is not available
@@ -188,15 +204,6 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *
}, nil
}
-// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport.
-func oauth2DialSettings(ds *DialSettings) *DialSettings {
- var ods DialSettings
- ods.DefaultEndpoint = google.Endpoint.TokenURL
- ods.DefaultMTLSEndpoint = google.MTLSTokenURL
- ods.ClientCertSource = ds.ClientCertSource
- return &ods
-}
-
// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS.
func customHTTPClient(tlsConfig *tls.Config) *http.Client {
trans := baseTransport()
@@ -219,3 +226,14 @@ func baseTransport() *http.Transport {
ExpectContinueTimeout: 1 * time.Second,
}
}
+
+// ErrUniverseNotMatch composes an error string from the provided universe
+// domain sources (DialSettings and Credentials, respectively).
+func ErrUniverseNotMatch(settingsUD, credsUD string) error {
+ return fmt.Errorf(
+ "the configured universe domain (%q) does not match the universe "+
+ "domain found in the credentials (%q). If you haven't configured "+
+ "WithUniverseDomain explicitly, \"googleapis.com\" is the default",
+ settingsUD,
+ credsUD)
+}
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index 84f9302dcfa..e17141a6f58 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -11,6 +11,7 @@ import (
"net/http"
"os"
"strconv"
+ "time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
@@ -19,7 +20,8 @@ import (
)
const (
- newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB"
+ newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB"
+ universeDomainDefault = "googleapis.com"
)
// DialSettings holds information needed to establish a connection with a
@@ -27,6 +29,7 @@ const (
type DialSettings struct {
Endpoint string
DefaultEndpoint string
+ DefaultEndpointTemplate string
DefaultMTLSEndpoint string
Scopes []string
DefaultScopes []string
@@ -55,6 +58,8 @@ type DialSettings struct {
EnableDirectPathXds bool
EnableNewAuthLibrary bool
AllowNonDefaultServiceAccount bool
+ UniverseDomain string
+ DefaultUniverseDomain string
// Google API system parameters. For more information please read:
// https://cloud.google.com/apis/docs/system-parameters
@@ -158,3 +163,66 @@ func (ds *DialSettings) Validate() error {
}
return nil
}
+
+// GetDefaultUniverseDomain returns the default service domain for a given Cloud
+// universe, as configured with internaloption.WithDefaultUniverseDomain.
+// The default value is "googleapis.com".
+func (ds *DialSettings) GetDefaultUniverseDomain() string {
+ if ds.DefaultUniverseDomain == "" {
+ return universeDomainDefault
+ }
+ return ds.DefaultUniverseDomain
+}
+
+// GetUniverseDomain returns the default service domain for a given Cloud
+// universe, as configured with option.WithUniverseDomain.
+// The default value is the value of GetDefaultUniverseDomain, as configured
+// with internaloption.WithDefaultUniverseDomain.
+func (ds *DialSettings) GetUniverseDomain() string {
+ if ds.UniverseDomain == "" {
+ return ds.GetDefaultUniverseDomain()
+ }
+ return ds.UniverseDomain
+}
+
+func (ds *DialSettings) IsUniverseDomainGDU() bool {
+ return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain()
+}
+
+// GetUniverseDomain returns the default service domain for a given Cloud
+// universe, from google.Credentials, for comparison with the value returned by
+// (*DialSettings).GetUniverseDomain. This wrapper function should be removed
+// to close [TODO(chrisdsmith): issue link here]. See details below.
+func GetUniverseDomain(creds *google.Credentials) (string, error) {
+ timer := time.NewTimer(time.Second)
+ defer timer.Stop()
+ errors := make(chan error)
+ results := make(chan string)
+
+ go func() {
+ result, err := creds.GetUniverseDomain()
+ if err != nil {
+ errors <- err
+ return
+ }
+ results <- result
+ }()
+
+ select {
+ case err := <-errors:
+ // An error that is returned before the timer expires is legitimate.
+ return "", err
+ case res := <-results:
+ return res, nil
+ case <-timer.C: // Timer is expired.
+ // If err or res was not returned, it means that creds.GetUniverseDomain()
+ // did not complete in 1s. Assume that MDS is likely never responding to
+ // the endpoint and will timeout. This is the source of issues such as
+ // https://github.com/googleapis/google-cloud-go/issues/9350.
+ // Temporarily (2024-02-02) return the GDU domain. Restore the original
+ // calls to creds.GetUniverseDomain() in grpc/dial.go and http/dial.go
+ // and remove this method to close
+ // https://github.com/googleapis/google-api-go-client/issues/2399.
+ return universeDomainDefault, nil
+ }
+}
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index c95bd7144f0..60852aed648 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.152.0"
+const Version = "0.167.0"
diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go
index b2b249eec68..e6b5c102555 100644
--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go
+++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go
@@ -22,10 +22,32 @@ func (o defaultEndpointOption) Apply(settings *internal.DialSettings) {
// It should only be used internally by generated clients.
//
// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint.
+//
+// Deprecated: WithDefaultEndpoint does not support setting the universe domain.
+// Use WithDefaultEndpointTemplate and WithDefaultUniverseDomain to compose the
+// default endpoint instead.
func WithDefaultEndpoint(url string) option.ClientOption {
return defaultEndpointOption(url)
}
+type defaultEndpointTemplateOption string
+
+func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) {
+ settings.DefaultEndpointTemplate = string(o)
+}
+
+// WithDefaultEndpointTemplate provides a template for creating the endpoint
+// using a universe domain. See also WithDefaultUniverseDomain and
+// option.WithUniverseDomain. The placeholder UNIVERSE_DOMAIN should be used
+// instead of a concrete universe domain such as "googleapis.com".
+//
+// Example: WithDefaultEndpointTemplate("https://logging.UNIVERSE_DOMAIN/")
+//
+// It should only be used internally by generated clients.
+func WithDefaultEndpointTemplate(url string) option.ClientOption {
+ return defaultEndpointTemplateOption(url)
+}
+
type defaultMTLSEndpointOption string
func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) {
@@ -126,8 +148,29 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) {
copy(o.DefaultScopes, w)
}
+// WithDefaultUniverseDomain returns a ClientOption that sets the default universe domain.
+//
+// It should only be used internally by generated clients.
+//
+// This is similar to the public WithUniverse, but allows us to determine whether the user has
+// overridden the default universe.
+func WithDefaultUniverseDomain(ud string) option.ClientOption {
+ return withDefaultUniverseDomain(ud)
+}
+
+type withDefaultUniverseDomain string
+
+func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) {
+ o.DefaultUniverseDomain = string(w)
+}
+
// EnableJwtWithScope returns a ClientOption that specifies if scope can be used
// with self-signed JWT.
+//
+// EnableJwtWithScope is ignored when option.WithUniverseDomain is set
+// to a value other than the Google Default Universe (GDU) of "googleapis.com".
+// For non-GDU domains, token exchange is impossible and services must
+// support self-signed JWTs with scopes.
func EnableJwtWithScope() option.ClientOption {
return enableJwtWithScope(true)
}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
index b2085a1949a..c882c1eb482 100644
--- a/vendor/google.golang.org/api/option/option.go
+++ b/vendor/google.golang.org/api/option/option.go
@@ -343,3 +343,16 @@ func (w *withCreds) Apply(o *internal.DialSettings) {
func WithCredentials(creds *google.Credentials) ClientOption {
return (*withCreds)(creds)
}
+
+// WithUniverseDomain returns a ClientOption that sets the universe domain.
+//
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func WithUniverseDomain(ud string) ClientOption {
+ return withUniverseDomain(ud)
+}
+
+type withUniverseDomain string
+
+func (w withUniverseDomain) Apply(o *internal.DialSettings) {
+ o.UniverseDomain = string(w)
+}
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
index 87a22f75863..bfc55594efb 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -14,10 +14,12 @@ import (
"net"
"os"
"strings"
+ "sync"
"time"
"cloud.google.com/go/compute/metadata"
"go.opencensus.io/plugin/ocgrpc"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/oauth2"
"golang.org/x/time/rate"
"google.golang.org/api/internal"
@@ -26,6 +28,7 @@ import (
grpcgoogle "google.golang.org/grpc/credentials/google"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/credentials/oauth"
+ "google.golang.org/grpc/stats"
// Install grpclb, which is required for direct path.
_ "google.golang.org/grpc/balancer/grpclb"
@@ -43,6 +46,29 @@ var timeoutDialerOption grpc.DialOption
// Log rate limiter
var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second}
+// Assign to var for unit test replacement
+var dialContext = grpc.DialContext
+
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: If 4226 has been fixed in opentelemetry-go-contrib, replace this
+// singleton with inline usage for simplicity.
+var (
+ initOtelStatsHandlerOnce sync.Once
+ otelStatsHandler stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+ initOtelStatsHandlerOnce.Do(func() {
+ otelStatsHandler = otelgrpc.NewClientHandler()
+ })
+ return otelStatsHandler
+}
+
// Dial returns a GRPC connection for use communicating with a Google cloud
// service, configured with the given ClientOptions.
func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
@@ -142,52 +168,67 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
// when dialing an insecure connection?
if !o.NoAuth && !insecure {
if o.APIKey != "" {
- log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.")
- }
- creds, err := internal.Creds(ctx, o)
- if err != nil {
- return nil, err
- }
-
- grpcOpts = append(grpcOpts,
- grpc.WithPerRPCCredentials(grpcTokenSource{
- TokenSource: oauth.TokenSource{creds.TokenSource},
- quotaProject: internal.GetQuotaProject(creds, o.QuotaProject),
+ grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcAPIKey{
+ apiKey: o.APIKey,
requestReason: o.RequestReason,
- }),
- )
-
- // Attempt Direct Path:
- logRateLimiter.Do(func() {
- logDirectPathMisconfig(endpoint, creds.TokenSource, o)
- })
- if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
- // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
- grpcOpts = []grpc.DialOption{
- grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))}
- if timeoutDialerOption != nil {
- grpcOpts = append(grpcOpts, timeoutDialerOption)
+ }))
+ } else {
+ creds, err := internal.Creds(ctx, o)
+ if err != nil {
+ return nil, err
}
- // Check if google-c2p resolver is enabled for DirectPath
- if isDirectPathXdsUsed(o) {
- // google-c2p resolver target must not have a port number
- if addr, _, err := net.SplitHostPort(endpoint); err == nil {
- endpoint = "google-c2p:///" + addr
- } else {
- endpoint = "google-c2p:///" + endpoint
+ if o.TokenSource == nil {
+ // We only validate non-tokensource creds, as TokenSource-based credentials
+ // don't propagate universe.
+ credsUniverseDomain, err := internal.GetUniverseDomain(creds)
+ if err != nil {
+ return nil, err
+ }
+ if o.GetUniverseDomain() != credsUniverseDomain {
+ return nil, internal.ErrUniverseNotMatch(o.GetUniverseDomain(), credsUniverseDomain)
+ }
+ }
+ grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{
+ TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource},
+ quotaProject: internal.GetQuotaProject(creds, o.QuotaProject),
+ requestReason: o.RequestReason,
+ }))
+ // Attempt Direct Path:
+ logRateLimiter.Do(func() {
+ logDirectPathMisconfig(endpoint, creds.TokenSource, o)
+ })
+ if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
+ // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
+ grpcOpts = []grpc.DialOption{
+ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(
+ grpcgoogle.DefaultCredentialsOptions{
+ PerRPCCreds: oauth.TokenSource{TokenSource: creds.TokenSource},
+ })),
}
- } else {
- if !strings.HasPrefix(endpoint, "dns:///") {
- endpoint = "dns:///" + endpoint
+ if timeoutDialerOption != nil {
+ grpcOpts = append(grpcOpts, timeoutDialerOption)
+ }
+ // Check if google-c2p resolver is enabled for DirectPath
+ if isDirectPathXdsUsed(o) {
+ // google-c2p resolver target must not have a port number
+ if addr, _, err := net.SplitHostPort(endpoint); err == nil {
+ endpoint = "google-c2p:///" + addr
+ } else {
+ endpoint = "google-c2p:///" + endpoint
+ }
+ } else {
+ if !strings.HasPrefix(endpoint, "dns:///") {
+ endpoint = "dns:///" + endpoint
+ }
+ grpcOpts = append(grpcOpts,
+ // For now all DirectPath go clients will be using the following lb config, but in future
+ // when different services need different configs, then we should change this to a
+ // per-service config.
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
}
- grpcOpts = append(grpcOpts,
- // For now all DirectPath go clients will be using the following lb config, but in future
- // when different services need different configs, then we should change this to a
- // per-service config.
- grpc.WithDisableServiceConfig(),
- grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
+ // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
}
- // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
}
}
@@ -195,12 +236,13 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
grpcOpts = addOCStatsHandler(grpcOpts, o)
+ grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o)
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
- return grpc.DialContext(ctx, endpoint, grpcOpts...)
+ return dialContext(ctx, endpoint, grpcOpts...)
}
func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
@@ -210,6 +252,13 @@ func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings)
return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
}
+func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
+ if settings.TelemetryDisabled {
+ return opts
+ }
+ return append(opts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
+}
+
// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource.
type grpcTokenSource struct {
oauth.TokenSource
@@ -237,6 +286,31 @@ func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string)
return metadata, nil
}
+// grpcAPIKey supplies PerRPCCredentials from an API Key.
+type grpcAPIKey struct {
+ apiKey string
+
+ // Additional metadata attached as headers.
+ requestReason string
+}
+
+// GetRequestMetadata gets the request metadata as a map from a grpcAPIKey.
+func (ts grpcAPIKey) GetRequestMetadata(ctx context.Context, uri ...string) (
+ map[string]string, error) {
+ metadata := map[string]string{
+ "X-goog-api-key": ts.apiKey,
+ }
+ if ts.requestReason != "" {
+ metadata["X-goog-request-reason"] = ts.requestReason
+ }
+ return metadata, nil
+}
+
+// RequireTransportSecurity indicates whether the credentials requires transport security.
+func (ts grpcAPIKey) RequireTransportSecurity() bool {
+ return true
+}
+
func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool {
if !o.EnableDirectPath {
return false
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
index a07362ffdbd..c4f5e0b1380 100644
--- a/vendor/google.golang.org/api/transport/http/dial.go
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -16,6 +16,7 @@ import (
"time"
"go.opencensus.io/plugin/ochttp"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
"golang.org/x/oauth2"
"google.golang.org/api/googleapi/transport"
@@ -69,6 +70,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna
requestReason: settings.RequestReason,
}
var trans http.RoundTripper = paramTransport
+ // Give OpenTelemetry precedence over OpenCensus in case user configuration
+ // causes both to write the same header (`X-Cloud-Trace-Context`).
+ trans = addOpenTelemetryTransport(trans, settings)
trans = addOCTransport(trans, settings)
switch {
case settings.NoAuth:
@@ -84,6 +88,17 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna
if err != nil {
return nil, err
}
+ if settings.TokenSource == nil {
+ // We only validate non-tokensource creds, as TokenSource-based credentials
+ // don't propagate universe.
+ credsUniverseDomain, err := internal.GetUniverseDomain(creds)
+ if err != nil {
+ return nil, err
+ }
+ if settings.GetUniverseDomain() != credsUniverseDomain {
+ return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain)
+ }
+ }
paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject)
ts := creds.TokenSource
if settings.ImpersonationConfig == nil && settings.TokenSource != nil {
@@ -203,6 +218,13 @@ func fallbackBaseTransport() *http.Transport {
}
}
+func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
+ if settings.TelemetryDisabled {
+ return trans
+ }
+ return otelhttp.NewTransport(trans)
+}
+
func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
if settings.TelemetryDisabled {
return trans
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index 83774fbcbe7..d5dccb93377 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.12
+// protoc v4.24.4
// source: google/api/client.proto
package annotations
@@ -1033,6 +1033,18 @@ type MethodSettings struct {
// total_poll_timeout:
// seconds: 54000 # 90 minutes
LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"`
+ // List of top-level fields of the request message, that should be
+ // automatically populated by the client libraries based on their
+ // (google.api.field_info).format. Currently supported format: UUID4.
+ //
+ // Example of a YAML configuration:
+ //
+ // publishing:
+ // method_settings:
+ // - selector: google.example.v1.ExampleService.CreateExample
+ // auto_populated_fields:
+ // - request_id
+ AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"`
}
func (x *MethodSettings) Reset() {
@@ -1081,6 +1093,13 @@ func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning {
return nil
}
+func (x *MethodSettings) GetAutoPopulatedFields() []string {
+ if x != nil {
+ return x.AutoPopulatedFields
+ }
+ return nil
+}
+
// Describes settings to use when generating API methods that use the
// long-running operation pattern.
// All default values below are from those used in the client library
@@ -1452,69 +1471,73 @@ var file_google_api_client_proto_rawDesc = []byte{
0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65,
0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65,
0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e,
0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65,
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69,
- 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a,
- 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12,
- 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
- 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50,
- 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c,
- 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
- 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e,
- 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a,
- 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54,
- 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c,
- 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03,
- 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10,
- 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57,
- 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05,
- 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e,
- 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18,
- 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73,
- 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49,
- 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a,
- 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41,
- 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68,
- 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73,
- 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f,
- 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63,
- 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
- 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2,
- 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12,
+ 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65,
+ 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13,
+ 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e,
+ 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70,
+ 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15,
+ 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69,
+ 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c,
+ 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72,
+ 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c,
+ 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61,
+ 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50,
+ 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61,
+ 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45,
+ 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e,
+ 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01,
+ 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f,
+ 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f,
+ 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49,
+ 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a,
+ 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07,
+ 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26,
+ 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44,
+ 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48,
+ 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f,
+ 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61,
+ 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42,
+ 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+ 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 5954801122a..e9e97d45111 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -430,7 +430,7 @@ type ClientHeader struct {
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual
// servers with different identities.
- // The authority is the name of such a server identitiy.
+ // The authority is the name of such a server identity.
// It is typically a portion of the URI in the form of
// or : .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index e6f2625b684..f6e815e6bfc 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -1860,27 +1860,15 @@ func (cc *ClientConn) determineAuthority() error {
}
endpoint := cc.parsedTarget.Endpoint()
- target := cc.target
- switch {
- case authorityFromDialOption != "":
+ if authorityFromDialOption != "" {
cc.authority = authorityFromDialOption
- case authorityFromCreds != "":
+ } else if authorityFromCreds != "" {
cc.authority = authorityFromCreds
- case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
- // TODO: remove when the unix resolver implements optional interface to
- // return channel authority.
- cc.authority = "localhost"
- case strings.HasPrefix(endpoint, ":"):
+ } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
+ cc.authority = auth.OverrideAuthority(cc.parsedTarget)
+ } else if strings.HasPrefix(endpoint, ":") {
cc.authority = "localhost" + endpoint
- default:
- // TODO: Define an optional interface on the resolver builder to return
- // the channel authority given the user's dial target. For resolvers
- // which don't implement this interface, we will use the endpoint from
- // "scheme://authority/endpoint" as the default authority.
- // Escape the endpoint to handle use cases where the endpoint
- // might not be a valid authority by default.
- // For example an endpoint which has multiple paths like
- // 'a/b/c', which is not a valid authority by default.
+ } else {
cc.authority = encodeAuthority(endpoint)
}
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
index 0854e7af651..6c867dd8501 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
@@ -25,6 +25,7 @@ import (
"fmt"
"io"
"net"
+ "time"
"golang.org/x/sync/semaphore"
grpc "google.golang.org/grpc"
@@ -60,8 +61,6 @@ var (
// control number of concurrent created (but not closed) handshakes.
clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes))
serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes))
- // errDropped occurs when maxPendingHandshakes is reached.
- errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached")
// errOutOfBound occurs when the handshake service returns a consumed
// bytes value larger than the buffer that was passed to it originally.
errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound")
@@ -155,8 +154,8 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn,
// ClientHandshake starts and completes a client ALTS handshake for GCP. Once
// done, ClientHandshake returns a secure connection.
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
- if !clientHandshakes.TryAcquire(1) {
- return nil, nil, errDropped
+ if err := clientHandshakes.Acquire(ctx, 1); err != nil {
+ return nil, nil, err
}
defer clientHandshakes.Release(1)
@@ -208,8 +207,8 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent
// ServerHandshake starts and completes a server ALTS handshake for GCP. Once
// done, ServerHandshake returns a secure connection.
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
- if !serverHandshakes.TryAcquire(1) {
- return nil, nil, errDropped
+ if err := serverHandshakes.Acquire(ctx, 1); err != nil {
+ return nil, nil, err
}
defer serverHandshakes.Release(1)
@@ -308,8 +307,10 @@ func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*al
// the results. Handshaker service takes care of frame parsing, so we read
// whatever received from the network and send it to the handshaker service.
func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) {
+ var lastWriteTime time.Time
for {
if len(resp.OutFrames) > 0 {
+ lastWriteTime = time.Now()
if _, err := h.conn.Write(resp.OutFrames); err != nil {
return nil, nil, err
}
@@ -333,11 +334,15 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b
// Append extra bytes from the previous interaction with the
// handshaker service with the current buffer read from conn.
p := append(extra, buf[:n]...)
+ // Compute the time elapsed since the last write to the peer.
+ timeElapsed := time.Since(lastWriteTime)
+ timeElapsedMs := uint32(timeElapsed.Milliseconds())
// From here on, p and extra point to the same slice.
resp, err = h.accessHandshakerService(&altspb.HandshakerReq{
ReqOneof: &altspb.HandshakerReq_Next{
Next: &altspb.NextHandshakeMessageReq{
- InBytes: p,
+ InBytes: p,
+ NetworkLatencyMs: timeElapsedMs,
},
},
})
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
index 81d0f114084..00407de7559 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
@@ -613,6 +613,10 @@ type NextHandshakeMessageReq struct {
// that the peer's out_frames are split into multiple NextHandshakerMessageReq
// messages.
InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
+ // Number of milliseconds between when the application send the last handshake
+ // message to the peer and when the application received the current handshake
+ // message (in the in_bytes field) from the peer.
+ NetworkLatencyMs uint32 `protobuf:"varint,2,opt,name=network_latency_ms,json=networkLatencyMs,proto3" json:"network_latency_ms,omitempty"`
}
func (x *NextHandshakeMessageReq) Reset() {
@@ -654,6 +658,13 @@ func (x *NextHandshakeMessageReq) GetInBytes() []byte {
return nil
}
+func (x *NextHandshakeMessageReq) GetNetworkLatencyMs() uint32 {
+ if x != nil {
+ return x.NetworkLatencyMs
+ }
+ return 0
+}
+
type HandshakerReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1116,89 +1127,92 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63,
0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61,
0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65,
0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a,
- 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46,
- 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73,
- 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
- 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48,
- 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37,
- 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
- 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64,
- 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48,
- 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f,
- 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
- 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
- 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f,
- 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74,
- 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61,
- 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
- 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65,
- 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61,
- 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e,
- 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
- 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72,
- 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
- 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74,
- 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61,
- 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
- 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72,
- 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46,
- 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63,
- 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06,
- 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
- 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64,
- 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
- 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e,
- 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f,
- 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a,
- 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02,
- 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50,
- 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
- 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07,
- 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73,
- 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b,
- 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
- 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
- 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
- 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
- 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48,
- 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
- 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
- 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
- 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63,
- 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12,
+ 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f,
+ 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48,
+ 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74,
+ 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
+ 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
+ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04,
+ 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
+ 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
+ 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65,
+ 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
+ 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37,
+ 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
+ 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b,
+ 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49,
+ 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70,
+ 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78,
+ 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22,
+ 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69,
+ 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61,
+ 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32,
+ 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
+ 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53,
+ 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54,
+ 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45,
+ 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f,
+ 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03,
+ 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
+ 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52,
+ 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61,
+ 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01,
+ 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c,
+ 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e,
+ 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
+ 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 2549fe8e3b8..6c7ea6a5336 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -57,7 +57,7 @@ var (
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by
// credentials/xds/xds.go.
- GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
+ GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
// GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
@@ -68,11 +68,6 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
CanonicalString any // func (codes.Code) string
- // DrainServerTransports initiates a graceful close of existing connections
- // on a gRPC server accepted on the provided listener address. An
- // xDS-enabled server invokes this method on a grpc.Server when a particular
- // listener moves to "not-serving" mode.
- DrainServerTransports any // func(*grpc.Server, string)
// IsRegisteredMethod returns whether the passed in method is registered as
// a method on the server.
IsRegisteredMethod any // func(*grpc.Server, string) bool
@@ -188,6 +183,19 @@ var (
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
ChannelzTurnOffForTesting func()
+
+ // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
+ // error for a given resource type and name. This is usually triggered when
+ // the associated watch timer fires. For testing purposes, having this
+ // function makes events more predictable than relying on timer events.
+ TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
+
+ // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton
+ // to invoke resource not found for a resource type name and resource name.
+ TriggerXDSResourceNameNotFoundClient any // func(string, string) error
+
+ // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
+ FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
index 16091168773..27cd81af9e5 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -61,6 +61,10 @@ func (b *builder) Scheme() string {
return b.scheme
}
+func (b *builder) OverrideAuthority(resolver.Target) string {
+ return "localhost"
+}
+
type nopResolver struct {
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
similarity index 96%
rename from vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go
rename to vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
index aeffd3e1c7b..4f347edd423 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
@@ -1,4 +1,4 @@
-//go:build !unix
+//go:build !unix && !windows
/*
* Copyright 2023 gRPC authors.
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
new file mode 100644
index 00000000000..fd7d43a8907
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -0,0 +1,54 @@
+//go:build windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keealive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index b330ccedc8a..83c3829826a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -535,8 +535,8 @@ const minBatchSize = 1000
// size is too low to give stream goroutines a chance to fill it up.
//
// Upon exiting, if the error causing the exit is not an I/O error, run()
-// flushes and closes the underlying connection. Otherwise, the connection is
-// left open to allow the I/O error to be encountered by the reader instead.
+// flushes the underlying connection. The connection is always left open to
+// allow different closing behavior on the client and server.
func (l *loopyWriter) run() (err error) {
defer func() {
if l.logger.V(logLevel) {
@@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) {
}
if !isIOError(err) {
l.framer.writer.Flush()
- l.conn.Close()
}
l.cbuf.finish()
}()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 59f67655a85..eff8799640c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -59,6 +59,8 @@ import (
// atomically.
var clientConnectionCounter uint64
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
@@ -449,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
- t.loopy.run()
+ if err := t.loopy.run(); !isIOError(err) {
+ // Immediately close the connection, as the loopy writer returns
+ // when there are no more active streams and we were draining (the
+ // server sent a GOAWAY). For I/O errors, the reader will hit it
+ // after draining any remaining incoming data.
+ t.conn.Close()
+ }
close(t.writerDone)
}()
return t, nil
@@ -568,7 +576,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
}
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@@ -1323,10 +1331,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
- if streamID > id && streamID <= upperLimit {
- atomic.StoreUint32(&stream.unprocessed, 1)
- streamsToClose = append(streamsToClose, stream)
- }
+ atomic.StoreUint32(&stream.unprocessed, 1)
+ streamsToClose = append(streamsToClose, stream)
}
}
t.mu.Unlock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 680c9eba0b1..a206e2eef7e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -322,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
- t.loopy.run()
+ err := t.loopy.run()
close(t.loopyWriterDone)
+ if !isIOError(err) {
+ // Close the connection if a non-I/O error occurs (for I/O errors
+ // the reader will also encounter the error and close). Wait 1
+ // second before closing the connection, or when the reader is done
+ // (i.e. the client already closed the connection or a connection
+ // error occurred). This avoids the potential problem where there
+ // is unread data on the receive side of the connection, which, if
+ // closed, would lead to a TCP RST instead of FIN, and the client
+ // encountering errors. For more info:
+ // https://github.com/grpc/grpc-go/issues/5358
+ select {
+ case <-t.readerDone:
+ case <-time.After(time.Second):
+ }
+ t.conn.Close()
+ }
}()
go t.keepalive()
return t, nil
@@ -609,8 +625,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
defer func() {
- <-t.loopyWriterDone
close(t.readerDone)
+ <-t.loopyWriterDone
}()
for {
t.controlBuf.throttle()
@@ -960,7 +976,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
}
if err := t.writeHeaderLocked(s); err != nil {
- return status.Convert(err).Err()
+ switch e := err.(type) {
+ case ConnectionError:
+ return status.Error(codes.Unavailable, e.Desc)
+ default:
+ return status.Convert(err).Err()
+ }
}
return nil
}
@@ -1324,6 +1345,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
+ t.framer.writer.Flush()
if retErr != nil {
return false, retErr
}
@@ -1344,7 +1366,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
return false, err
}
go func() {
- timer := time.NewTimer(time.Minute)
+ timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case <-t.drainEvent.Done():
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 49446825763..1e9485fd6e2 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -25,8 +25,14 @@ import (
"context"
"fmt"
"strings"
+
+ "google.golang.org/grpc/internal"
)
+func init() {
+ internal.FromOutgoingContextRaw = fromOutgoingContextRaw
+}
+
// DecodeKeyValue returns k, v, nil.
//
// Deprecated: use k and v directly instead.
@@ -238,16 +244,13 @@ func copyOf(v []string) []string {
return vals
}
-// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
+// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
// is a map, there's no guarantee it's created using our helper functions) and
// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
// lowercase).
-//
-// This is intended for gRPC-internal use ONLY. Users should use
-// FromOutgoingContext instead.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, nil, false
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index bd1c7d01b7e..adf89dd9cfe 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -314,3 +314,13 @@ type Resolver interface {
// Close closes the resolver.
Close()
}
+
+// AuthorityOverrider is implemented by Builders that wish to override the
+// default authority for the ClientConn.
+// By default, the authority used is target.Endpoint().
+type AuthorityOverrider interface {
+ // OverrideAuthority returns the authority to use for a ClientConn with the
+ // given target. The implementation must generate it without blocking,
+ // typically in line, and must keep it unchanged.
+ OverrideAuthority(Target) string
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index b7723aa09cb..a4b6bc6873c 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -640,14 +640,18 @@ func encode(c baseCodec, msg any) ([]byte, error) {
return b, nil
}
-// compress returns the input bytes compressed by compressor or cp. If both
-// compressors are nil, returns nil.
+// compress returns the input bytes compressed by compressor or cp.
+// If both compressors are nil, or if the message has zero length, returns nil,
+// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
if compressor == nil && cp == nil {
return nil, nil
}
+ if len(in) == 0 {
+ return nil, nil
+ }
wrapErr := func(err error) error {
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 682fa1831ec..e89c5ac6136 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -74,9 +74,6 @@ func init() {
return srv.isRegisteredMethod(method)
}
internal.ServerFromContext = serverFromContext
- internal.DrainServerTransports = func(srv *Server, addr string) {
- srv.drainServerTransports(addr)
- }
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
globalServerOptions = append(globalServerOptions, opt...)
}
@@ -139,7 +136,8 @@ type Server struct {
quit *grpcsync.Event
done *grpcsync.Event
channelzRemoveOnce sync.Once
- serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
+ serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
+ handlersWG sync.WaitGroup // counts active method handler goroutines
channelzID *channelz.Identifier
czData *channelzData
@@ -176,6 +174,7 @@ type serverOptions struct {
headerTableSize *uint32
numServerWorkers uint32
recvBufferPool SharedBufferPool
+ waitForHandlers bool
}
var defaultServerOptions = serverOptions{
@@ -573,6 +572,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
})
}
+// WaitForHandlers cause Stop to wait until all outstanding method handlers have
+// exited before returning. If false, Stop will return as soon as all
+// connections have closed, but method handlers may still be running. By
+// default, Stop does not wait for method handlers to return.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WaitForHandlers(w bool) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.waitForHandlers = w
+ })
+}
+
// RecvBufferPool returns a ServerOption that configures the server
// to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation.
@@ -932,6 +946,12 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
return
}
+ if cc, ok := rawConn.(interface {
+ PassServerTransport(transport.ServerTransport)
+ }); ok {
+ cc.PassServerTransport(st)
+ }
+
if !s.addConn(lisAddr, st) {
return
}
@@ -941,15 +961,6 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
}()
}
-func (s *Server) drainServerTransports(addr string) {
- s.mu.Lock()
- conns := s.conns[addr]
- for st := range conns {
- st.Drain("")
- }
- s.mu.Unlock()
-}
-
// newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go).
func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
@@ -1010,9 +1021,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
st.HandleStreams(ctx, func(stream *transport.Stream) {
+ s.handlersWG.Add(1)
streamQuota.acquire()
f := func() {
defer streamQuota.release()
+ defer s.handlersWG.Done()
s.handleStream(st, stream)
}
@@ -1911,6 +1924,10 @@ func (s *Server) stop(graceful bool) {
s.serverWorkerChannelClose()
}
+ if graceful || s.opts.waitForHandlers {
+ s.handlersWG.Wait()
+ }
+
if s.events != nil {
s.events.Finish()
s.events = nil
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index b14b2fbea2e..d621f52b1ab 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -48,6 +48,8 @@ import (
"google.golang.org/grpc/status"
)
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// StreamHandler defines the handler called by gRPC server to complete the
// execution of a streaming RPC.
//
@@ -184,7 +186,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
// validate md
if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error())
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index dc2cea59c93..f1aec4c0ade 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.60.1"
+const Version = "1.61.1"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index 896dc38f506..5da38a40996 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -88,7 +88,7 @@ not git grep -l 'x/net/context' -- "*.go"
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not use "interface{}"; use "any" instead.
-git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
+git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate'
# - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
@@ -127,7 +127,7 @@ staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
# Exclude underscore checks for generated code.
-grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)'
+grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)'
# Error for duplicate imports not including grpc protos.
grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
@@ -152,6 +152,7 @@ grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
XXXXX Protobuf related deprecation errors:
"github.com/golang/protobuf
.pb.go:
+grpc_testing_not_regenerate
: ptypes.
proto.RegisterType
XXXXX gRPC internal usage deprecation errors:
@@ -184,9 +185,6 @@ GetSafeRegexMatch
GetSuffixMatch
GetTlsCertificateCertificateProviderInstance
GetValidationContextCertificateProviderInstance
-XXXXX TODO: Remove the below deprecation usages:
-CloseNotifier
-Roots.Subjects
XXXXX PleaseIgnoreUnused'
echo SUCCESS
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index 5f28148d805..f47902371a6 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -11,6 +11,7 @@ import (
"strconv"
"strings"
+ "google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/json"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
@@ -23,7 +24,7 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
-// Unmarshal reads the given []byte into the given proto.Message.
+// Unmarshal reads the given []byte into the given [proto.Message].
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
@@ -37,7 +38,7 @@ type UnmarshalOptions struct {
// required fields will not return an error.
AllowPartial bool
- // If DiscardUnknown is set, unknown fields are ignored.
+ // If DiscardUnknown is set, unknown fields and enum name values are ignored.
DiscardUnknown bool
// Resolver is used for looking up types when unmarshaling
@@ -47,9 +48,13 @@ type UnmarshalOptions struct {
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
+
+ // RecursionLimit limits how deeply messages may be nested.
+ // If zero, a default limit is applied.
+ RecursionLimit int
}
-// Unmarshal reads the given []byte and populates the given proto.Message
+// Unmarshal reads the given []byte and populates the given [proto.Message]
// using options in the UnmarshalOptions object.
// It will clear the message first before setting the fields.
// If it returns an error, the given message may be partially set.
@@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
+ if o.RecursionLimit == 0 {
+ o.RecursionLimit = protowire.DefaultRecursionLimit
+ }
dec := decoder{json.NewDecoder(b), o}
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
@@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
+ d.opts.RecursionLimit--
+ if d.opts.RecursionLimit < 0 {
+ return errors.New("exceeded max recursion depth")
+ }
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
return unmarshal(d, m)
}
@@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field
if err != nil {
return err
}
- m.Set(fd, val)
+ if val.IsValid() {
+ m.Set(fd, val)
+ }
return nil
}
@@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
}
case protoreflect.EnumKind:
- if v, ok := unmarshalEnum(tok, fd); ok {
+ if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok {
return v, nil
}
@@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
return protoreflect.ValueOfBytes(b), true
}
-func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
+func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.String:
// Lookup EnumNumber based on name.
@@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
return protoreflect.ValueOfEnum(enumVal.Number()), true
}
+ if discardUnknown {
+ return protoreflect.Value{}, true
+ }
case json.Number:
if n, ok := tok.Int(32); ok {
@@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc
if err != nil {
return err
}
- list.Append(val)
+ if val.IsValid() {
+ list.Append(val)
+ }
}
}
@@ -609,8 +628,9 @@ Loop:
if err != nil {
return err
}
-
- mmap.Set(pkey, pval)
+ if pval.IsValid() {
+ mmap.Set(pkey, pval)
+ }
}
return nil
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
index 21d5d2cb18e..ae71007c18b 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -6,6 +6,6 @@
// format. It follows the guide at
// https://protobuf.dev/programming-guides/proto3#json.
//
-// This package produces a different output than the standard "encoding/json"
+// This package produces a different output than the standard [encoding/json]
// package, which does not operate correctly on protocol buffer messages.
package protojson
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 66b95870e97..3f75098b6fb 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -31,7 +31,7 @@ func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
-// Marshal writes the given proto.Message in JSON format using default options.
+// Marshal writes the given [proto.Message] in JSON format using default options.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
@@ -81,6 +81,25 @@ type MarshalOptions struct {
// â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
EmitUnpopulated bool
+ // EmitDefaultValues specifies whether to emit default-valued primitive fields,
+ // empty lists, and empty maps. The fields affected are as follows:
+ // â•”â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•—
+ // ║ JSON │ Protobuf field ║
+ // â• â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£
+ // ║ false │ non-optional scalar boolean fields ║
+ // ║ 0 │ non-optional scalar numeric fields ║
+ // ║ "" │ non-optional scalar string/byte fields ║
+ // ║ [] │ empty repeated fields ║
+ // ║ {} │ empty map fields ║
+ // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
+ //
+ // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields,
+ // i.e. presence-sensing fields that are omitted will remain omitted to preserve
+ // presence-sensing.
+ // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates
+ // a strict superset of the latter.
+ EmitDefaultValues bool
+
// Resolver is used for looking up types when expanding google.protobuf.Any
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
@@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string {
return string(b)
}
-// Marshal marshals the given proto.Message in the JSON format using options in
+// Marshal marshals the given [proto.Message] in the JSON format using options in
// MarshalOptions. Do not depend on the output being stable. It may change over
// time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
@@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
// method to additionally iterate over unpopulated fields.
-type unpopulatedFieldRanger struct{ protoreflect.Message }
+type unpopulatedFieldRanger struct {
+ protoreflect.Message
+
+ skipNull bool
+}
func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
fds := m.Descriptor().Fields()
@@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
if isProto2Scalar || isSingularMessage {
+ if m.skipNull {
+ continue
+ }
v = protoreflect.Value{} // use invalid value to emit null
}
if !f(fd, v) {
@@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
defer e.EndObject()
var fields order.FieldRanger = m
- if e.opts.EmitUnpopulated {
- fields = unpopulatedFieldRanger{m}
+ switch {
+ case e.opts.EmitUnpopulated:
+ fields = unpopulatedFieldRanger{Message: m, skipNull: false}
+ case e.opts.EmitDefaultValues:
+ fields = unpopulatedFieldRanger{Message: m, skipNull: true}
}
if typeURL != "" {
fields = typeURLFieldRanger{fields, typeURL}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 6c37d417449..25329b7692e 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error {
// Use another decoder to parse the unread bytes for @type field. This
// avoids advancing a read from current decoder because the current JSON
// object may contain the fields of the embedded type.
- dec := decoder{d.Clone(), UnmarshalOptions{}}
+ dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}}
tok, err := findTypeURL(dec)
switch err {
case errEmptyObject:
@@ -308,48 +308,25 @@ Loop:
// array) in order to advance the read to the next JSON value. It relies on
// the decoder returning an error if the types are not in valid sequence.
func (d decoder) skipJSONValue() error {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- // Only need to continue reading for objects and arrays.
- switch tok.Kind() {
- case json.ObjectOpen:
- for {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- case json.ObjectClose:
- return nil
- case json.Name:
- // Skip object field value.
- if err := d.skipJSONValue(); err != nil {
- return err
- }
- }
+ var open int
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
}
-
- case json.ArrayOpen:
- for {
- tok, err := d.Peek()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- case json.ArrayClose:
- d.Read()
- return nil
- default:
- // Skip array item.
- if err := d.skipJSONValue(); err != nil {
- return err
- }
+ switch tok.Kind() {
+ case json.ObjectClose, json.ArrayClose:
+ open--
+ case json.ObjectOpen, json.ArrayOpen:
+ open++
+ if open > d.opts.RecursionLimit {
+ return errors.New("exceeded max recursion depth")
}
}
+ if open == 0 {
+ return nil
+ }
}
- return nil
}
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index 4921b2d4a76..a45f112bce3 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -21,7 +21,7 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
-// Unmarshal reads the given []byte into the given proto.Message.
+// Unmarshal reads the given []byte into the given [proto.Message].
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
@@ -51,7 +51,7 @@ type UnmarshalOptions struct {
}
}
-// Unmarshal reads the given []byte and populates the given proto.Message
+// Unmarshal reads the given []byte and populates the given [proto.Message]
// using options in the UnmarshalOptions object.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
@@ -739,7 +739,9 @@ func (d decoder) skipValue() error {
case text.ListClose:
return nil
case text.MessageOpen:
- return d.skipMessageValue()
+ if err := d.skipMessageValue(); err != nil {
+ return err
+ }
default:
// Skip items. This will not validate whether skipped values are
// of the same type or not, same behavior as C++
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 722a7b41df3..95967e8112a 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -33,7 +33,7 @@ func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
-// Marshal writes the given proto.Message in textproto format using default
+// Marshal writes the given [proto.Message] in textproto format using default
// options. Do not depend on the output being stable. It may change over time
// across different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
@@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string {
return string(b)
}
-// Marshal writes the given proto.Message in textproto format using options in
+// Marshal writes the given [proto.Message] in textproto format using options in
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index f4b4686cf9d..e942bc983ee 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -6,7 +6,7 @@
// See https://protobuf.dev/programming-guides/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
-// use the "google.golang.org/protobuf/proto" package instead.
+// use the [google.golang.org/protobuf/proto] package instead.
package protowire
import (
@@ -87,7 +87,7 @@ func ParseError(n int) error {
// ConsumeField parses an entire field record (both tag and value) and returns
// the field number, the wire type, and the total length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
//
// The total length includes the tag header and the end group marker (if the
// field is a group).
@@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) {
}
// ConsumeFieldValue parses a field value and returns its length.
-// This assumes that the field Number and wire Type have already been parsed.
-// This returns a negative length upon an error (see ParseError).
+// This assumes that the field [Number] and wire [Type] have already been parsed.
+// This returns a negative length upon an error (see [ParseError]).
//
// When parsing a group, the length includes the end group marker and
// the end group is verified to match the starting field number.
@@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte {
}
// ConsumeTag parses b as a varint-encoded tag, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeTag(b []byte) (Number, Type, int) {
v, n := ConsumeVarint(b)
if n < 0 {
@@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte {
}
// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeVarint(b []byte) (v uint64, n int) {
var y uint64
if len(b) <= 0 {
@@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte {
}
// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeFixed32(b []byte) (v uint32, n int) {
if len(b) < 4 {
return 0, errCodeTruncated
@@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte {
}
// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeFixed64(b []byte) (v uint64, n int) {
if len(b) < 8 {
return 0, errCodeTruncated
@@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte {
}
// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeBytes(b []byte) (v []byte, n int) {
m, n := ConsumeVarint(b)
if n < 0 {
@@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte {
}
// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeString(b []byte) (v string, n int) {
bb, n := ConsumeBytes(b)
return string(bb), n
@@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte {
// ConsumeGroup parses b as a group value until the trailing end group marker,
// and verifies that the end marker matches the provided num. The value v
// does not contain the end marker, while the length does contain the end marker.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
n = ConsumeFieldValue(num, StartGroupType, b)
if n < 0 {
@@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int {
return n + SizeTag(num)
}
-// DecodeTag decodes the field Number and wire Type from its unified form.
-// The Number is -1 if the decoded field number overflows int32.
+// DecodeTag decodes the field [Number] and wire [Type] from its unified form.
+// The [Number] is -1 if the decoded field number overflows int32.
// Other than overflow, this does not check for field number validity.
func DecodeTag(x uint64) (Number, Type) {
// NOTE: MessageSet allows for larger field numbers than normal.
@@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) {
return Number(x >> 3), Type(x & 7)
}
-// EncodeTag encodes the field Number and wire Type into its unified form.
+// EncodeTag encodes the field [Number] and wire [Type] into its unified form.
func EncodeTag(num Number, typ Type) uint64 {
return uint64(num)<<3 | uint64(typ&7)
}
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index db5248e1b51..a45625c8d1f 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
case protoreflect.FileImports:
for i := 0; i < vs.Len(); i++ {
var rs records
- rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
+ rv := reflect.ValueOf(vs.Get(i))
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Path"), "Path"},
+ {rv.MethodByName("Package"), "Package"},
+ {rv.MethodByName("IsPublic"), "IsPublic"},
+ {rv.MethodByName("IsWeak"), "IsWeak"},
+ }...)
ss = append(ss, "{"+rs.Join()+"}")
}
return start + joinStrings(ss, allowMulti) + end
@@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
for i := 0; i < vs.Len(); i++ {
m := reflect.ValueOf(vs).MethodByName("Get")
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
- ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue))
+ ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil))
}
return start + joinStrings(ss, allowMulti && isEnumValue) + end
}
}
-// descriptorAccessors is a list of accessors to print for each descriptor.
-//
-// Do not print all accessors since some contain redundant information,
-// while others are pointers that we do not want to follow since the descriptor
-// is actually a cyclic graph.
-//
-// Using a list allows us to print the accessors in a sensible order.
-var descriptorAccessors = map[reflect.Type][]string{
- reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
- reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
- reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
- reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
- reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
- reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"},
- reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"},
- reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
+type methodAndName struct {
+ method reflect.Value
+ name string
}
func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
- io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+ io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil))
}
-func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
+
+func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
+ return formatDescOpt(t, isRoot, allowMulti, record)
+}
+
+func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
rv := reflect.ValueOf(t)
rt := rv.MethodByName("ProtoType").Type().In(0)
@@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
}
_, isFile := t.(protoreflect.FileDescriptor)
- rs := records{allowMulti: allowMulti}
+ rs := records{
+ allowMulti: allowMulti,
+ record: record,
+ }
if t.IsPlaceholder() {
if isFile {
- rs.Append(rv, "Path", "Package", "IsPlaceholder")
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Path"), "Path"},
+ {rv.MethodByName("Package"), "Package"},
+ {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
+ }...)
} else {
- rs.Append(rv, "FullName", "IsPlaceholder")
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("FullName"), "FullName"},
+ {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
+ }...)
}
} else {
switch {
case isFile:
- rs.Append(rv, "Syntax")
+ rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"})
case isRoot:
- rs.Append(rv, "Syntax", "FullName")
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Syntax"), "Syntax"},
+ {rv.MethodByName("FullName"), "FullName"},
+ }...)
default:
- rs.Append(rv, "Name")
+ rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"})
}
switch t := t.(type) {
case protoreflect.FieldDescriptor:
- for _, s := range descriptorAccessors[rt] {
- switch s {
+ accessors := []methodAndName{
+ {rv.MethodByName("Number"), "Number"},
+ {rv.MethodByName("Cardinality"), "Cardinality"},
+ {rv.MethodByName("Kind"), "Kind"},
+ {rv.MethodByName("HasJSONName"), "HasJSONName"},
+ {rv.MethodByName("JSONName"), "JSONName"},
+ {rv.MethodByName("HasPresence"), "HasPresence"},
+ {rv.MethodByName("IsExtension"), "IsExtension"},
+ {rv.MethodByName("IsPacked"), "IsPacked"},
+ {rv.MethodByName("IsWeak"), "IsWeak"},
+ {rv.MethodByName("IsList"), "IsList"},
+ {rv.MethodByName("IsMap"), "IsMap"},
+ {rv.MethodByName("MapKey"), "MapKey"},
+ {rv.MethodByName("MapValue"), "MapValue"},
+ {rv.MethodByName("HasDefault"), "HasDefault"},
+ {rv.MethodByName("Default"), "Default"},
+ {rv.MethodByName("ContainingOneof"), "ContainingOneof"},
+ {rv.MethodByName("ContainingMessage"), "ContainingMessage"},
+ {rv.MethodByName("Message"), "Message"},
+ {rv.MethodByName("Enum"), "Enum"},
+ }
+ for _, s := range accessors {
+ switch s.name {
case "MapKey":
if k := t.MapKey(); k != nil {
rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
@@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
if v := t.MapValue(); v != nil {
switch v.Kind() {
case protoreflect.EnumKind:
- rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
+ rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())})
case protoreflect.MessageKind, protoreflect.GroupKind:
- rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
+ rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())})
default:
- rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
+ rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()})
}
}
case "ContainingOneof":
if od := t.ContainingOneof(); od != nil {
- rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
+ rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())})
}
case "ContainingMessage":
if t.IsExtension() {
- rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
+ rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())})
}
case "Message":
if !t.IsMap() {
@@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
ss = append(ss, string(fs.Get(i).Name()))
}
if len(ss) > 0 {
- rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
+ rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
}
- default:
- rs.Append(rv, descriptorAccessors[rt]...)
+
+ case protoreflect.FileDescriptor:
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Path"), "Path"},
+ {rv.MethodByName("Package"), "Package"},
+ {rv.MethodByName("Imports"), "Imports"},
+ {rv.MethodByName("Messages"), "Messages"},
+ {rv.MethodByName("Enums"), "Enums"},
+ {rv.MethodByName("Extensions"), "Extensions"},
+ {rv.MethodByName("Services"), "Services"},
+ }...)
+
+ case protoreflect.MessageDescriptor:
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("IsMapEntry"), "IsMapEntry"},
+ {rv.MethodByName("Fields"), "Fields"},
+ {rv.MethodByName("Oneofs"), "Oneofs"},
+ {rv.MethodByName("ReservedNames"), "ReservedNames"},
+ {rv.MethodByName("ReservedRanges"), "ReservedRanges"},
+ {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"},
+ {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"},
+ {rv.MethodByName("Messages"), "Messages"},
+ {rv.MethodByName("Enums"), "Enums"},
+ {rv.MethodByName("Extensions"), "Extensions"},
+ }...)
+
+ case protoreflect.EnumDescriptor:
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Values"), "Values"},
+ {rv.MethodByName("ReservedNames"), "ReservedNames"},
+ {rv.MethodByName("ReservedRanges"), "ReservedRanges"},
+ }...)
+
+ case protoreflect.EnumValueDescriptor:
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Number"), "Number"},
+ }...)
+
+ case protoreflect.ServiceDescriptor:
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Methods"), "Methods"},
+ }...)
+
+ case protoreflect.MethodDescriptor:
+ rs.Append(rv, []methodAndName{
+ {rv.MethodByName("Input"), "Input"},
+ {rv.MethodByName("Output"), "Output"},
+ {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"},
+ {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"},
+ }...)
}
- if rv.MethodByName("GoType").IsValid() {
- rs.Append(rv, "GoType")
+ if m := rv.MethodByName("GoType"); m.IsValid() {
+ rs.Append(rv, methodAndName{m, "GoType"})
}
}
return start + rs.Join() + end
@@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
type records struct {
recs [][2]string
allowMulti bool
+
+ // record is a function that will be called for every Append() or
+ // AppendRecs() call, to be used for testing with the
+ // InternalFormatDescOptForTesting function.
+ record func(string)
}
-func (rs *records) Append(v reflect.Value, accessors ...string) {
+func (rs *records) AppendRecs(fieldName string, newRecs [2]string) {
+ if rs.record != nil {
+ rs.record(fieldName)
+ }
+ rs.recs = append(rs.recs, newRecs)
+}
+
+func (rs *records) Append(v reflect.Value, accessors ...methodAndName) {
for _, a := range accessors {
+ if rs.record != nil {
+ rs.record(a.name)
+ }
var rv reflect.Value
- if m := v.MethodByName(a); m.IsValid() {
- rv = m.Call(nil)[0]
+ if a.method.IsValid() {
+ rv = a.method.Call(nil)[0]
}
if v.Kind() == reflect.Struct && !rv.IsValid() {
- rv = v.FieldByName(a)
+ rv = v.FieldByName(a.name)
}
if !rv.IsValid() {
- panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
+ panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name))
}
if _, ok := rv.Interface().(protoreflect.Value); ok {
rv = rv.MethodByName("Interface").Call(nil)[0]
@@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
default:
s = fmt.Sprint(v)
}
- rs.recs = append(rs.recs, [2]string{a, s})
+ rs.recs = append(rs.recs, [2]string{a.name, s})
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 7c3689baee8..193c68e8f91 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -21,11 +21,26 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
+// Edition is an Enum for proto2.Edition
+type Edition int32
+
+// These values align with the value of Enum in descriptor.proto which allows
+// direct conversion between the proto enum and this enum.
+const (
+ EditionUnknown Edition = 0
+ EditionProto2 Edition = 998
+ EditionProto3 Edition = 999
+ Edition2023 Edition = 1000
+ EditionUnsupported Edition = 100000
+)
+
// The types in this file may have a suffix:
// • L0: Contains fields common to all descriptors (except File) and
// must be initialized up front.
// • L1: Contains fields specific to a descriptor and
-// must be initialized up front.
+// must be initialized up front. If the associated proto uses Editions, the
+// Editions features must always be resolved. If not explicitly set, the
+// appropriate default must be resolved and set.
// • L2: Contains fields that are lazily initialized when constructing
// from the raw file descriptor. When constructing as a literal, the L2
// fields must be initialized up front.
@@ -44,6 +59,7 @@ type (
}
FileL1 struct {
Syntax protoreflect.Syntax
+ Edition Edition // Only used if Syntax == Editions
Path string
Package protoreflect.FullName
@@ -51,12 +67,35 @@ type (
Messages Messages
Extensions Extensions
Services Services
+
+ EditionFeatures FileEditionFeatures
}
FileL2 struct {
Options func() protoreflect.ProtoMessage
Imports FileImports
Locations SourceLocations
}
+
+ FileEditionFeatures struct {
+ // IsFieldPresence is true if field_presence is EXPLICIT
+ // https://protobuf.dev/editions/features/#field_presence
+ IsFieldPresence bool
+ // IsOpenEnum is true if enum_type is OPEN
+ // https://protobuf.dev/editions/features/#enum_type
+ IsOpenEnum bool
+ // IsPacked is true if repeated_field_encoding is PACKED
+ // https://protobuf.dev/editions/features/#repeated_field_encoding
+ IsPacked bool
+ // IsUTF8Validated is true if utf_validation is VERIFY
+ // https://protobuf.dev/editions/features/#utf8_validation
+ IsUTF8Validated bool
+ // IsDelimitedEncoded is true if message_encoding is DELIMITED
+ // https://protobuf.dev/editions/features/#message_encoding
+ IsDelimitedEncoded bool
+ // IsJSONCompliant is true if json_format is ALLOW
+ // https://protobuf.dev/editions/features/#json_format
+ IsJSONCompliant bool
+ }
)
func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
@@ -210,6 +249,9 @@ type (
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
+
+ // Edition features.
+ Presence bool
}
Oneof struct {
@@ -273,6 +315,9 @@ func (fd *Field) HasJSONName() bool { return fd.L1.StringNam
func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
func (fd *Field) HasPresence() bool {
+ if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
+ return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
+ }
return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
}
func (fd *Field) HasOptionalKeyword() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 136f1b21573..8f94230ea1c 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -12,6 +12,12 @@ import (
const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto"
+// Full and short names for google.protobuf.Edition.
+const (
+ Edition_enum_fullname = "google.protobuf.Edition"
+ Edition_enum_name = "Edition"
+)
+
// Names for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
@@ -81,7 +87,7 @@ const (
FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
- FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13
+ FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14
)
// Names for google.protobuf.DescriptorProto.
@@ -184,10 +190,12 @@ const (
const (
ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration"
+ ExtensionRangeOptions_Features_field_name protoreflect.Name = "features"
ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification"
ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration"
+ ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features"
ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification"
)
@@ -195,6 +203,7 @@ const (
const (
ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2
+ ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50
ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3
)
@@ -212,29 +221,26 @@ const (
// Field names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
- ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
- ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
- ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
- ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated"
- ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
- ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
+ ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
+ ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
+ ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
+ ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
+ ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
- ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
- ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
- ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
- ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated"
- ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
- ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
+ ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
+ ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
+ ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
+ ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
+ ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration.
const (
- ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
- ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
- ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
- ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4
- ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
- ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
+ ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
+ ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
+ ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
+ ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
+ ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.FieldDescriptorProto.
@@ -478,6 +484,7 @@ const (
FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace"
FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace"
FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package"
+ FileOptions_Features_field_name protoreflect.Name = "features"
FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package"
@@ -500,6 +507,7 @@ const (
FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace"
FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace"
FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package"
+ FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features"
FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option"
)
@@ -525,6 +533,7 @@ const (
FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41
FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44
FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45
+ FileOptions_Features_field_number protoreflect.FieldNumber = 50
FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -547,6 +556,7 @@ const (
MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
+ MessageOptions_Features_field_name protoreflect.Name = "features"
MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
@@ -554,6 +564,7 @@ const (
MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
+ MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features"
MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
)
@@ -564,6 +575,7 @@ const (
MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11
+ MessageOptions_Features_field_number protoreflect.FieldNumber = 12
MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -584,8 +596,9 @@ const (
FieldOptions_Weak_field_name protoreflect.Name = "weak"
FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
FieldOptions_Retention_field_name protoreflect.Name = "retention"
- FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_Targets_field_name protoreflect.Name = "targets"
+ FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
+ FieldOptions_Features_field_name protoreflect.Name = "features"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@@ -597,8 +610,9 @@ const (
FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
- FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
+ FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
+ FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@@ -613,8 +627,9 @@ const (
FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
- FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
+ FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
+ FieldOptions_Features_field_number protoreflect.FieldNumber = 21
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -642,6 +657,27 @@ const (
FieldOptions_OptionTargetType_enum_name = "OptionTargetType"
)
+// Names for google.protobuf.FieldOptions.EditionDefault.
+const (
+ FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault"
+ FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault"
+)
+
+// Field names for google.protobuf.FieldOptions.EditionDefault.
+const (
+ FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition"
+ FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value"
+
+ FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition"
+ FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value"
+)
+
+// Field numbers for google.protobuf.FieldOptions.EditionDefault.
+const (
+ FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3
+ FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
+)
+
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@@ -650,13 +686,16 @@ const (
// Field names for google.protobuf.OneofOptions.
const (
+ OneofOptions_Features_field_name protoreflect.Name = "features"
OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+ OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features"
OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.OneofOptions.
const (
+ OneofOptions_Features_field_number protoreflect.FieldNumber = 1
OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -671,11 +710,13 @@ const (
EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
+ EnumOptions_Features_field_name protoreflect.Name = "features"
EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts"
+ EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features"
EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
)
@@ -684,6 +725,7 @@ const (
EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6
+ EnumOptions_Features_field_number protoreflect.FieldNumber = 7
EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -696,15 +738,21 @@ const (
// Field names for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ EnumValueOptions_Features_field_name protoreflect.Name = "features"
+ EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
+ EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
+ EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
+ EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
+ EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -716,15 +764,18 @@ const (
// Field names for google.protobuf.ServiceOptions.
const (
+ ServiceOptions_Features_field_name protoreflect.Name = "features"
ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated"
ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+ ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features"
ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated"
ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.ServiceOptions.
const (
+ ServiceOptions_Features_field_number protoreflect.FieldNumber = 34
ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33
ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -739,10 +790,12 @@ const (
const (
MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level"
+ MethodOptions_Features_field_name protoreflect.Name = "features"
MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated"
MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level"
+ MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features"
MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option"
)
@@ -750,6 +803,7 @@ const (
const (
MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33
MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34
+ MethodOptions_Features_field_number protoreflect.FieldNumber = 35
MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -816,6 +870,120 @@ const (
UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2
)
+// Names for google.protobuf.FeatureSet.
+const (
+ FeatureSet_message_name protoreflect.Name = "FeatureSet"
+ FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet"
+)
+
+// Field names for google.protobuf.FeatureSet.
+const (
+ FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
+ FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
+ FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
+ FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
+
+ FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+ FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+ FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+ FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+)
+
+// Field numbers for google.protobuf.FeatureSet.
+const (
+ FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
+ FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
+ FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
+ FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
+ FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
+ FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+)
+
+// Full and short names for google.protobuf.FeatureSet.FieldPresence.
+const (
+ FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence"
+ FeatureSet_FieldPresence_enum_name = "FieldPresence"
+)
+
+// Full and short names for google.protobuf.FeatureSet.EnumType.
+const (
+ FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType"
+ FeatureSet_EnumType_enum_name = "EnumType"
+)
+
+// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding.
+const (
+ FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding"
+ FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding"
+)
+
+// Full and short names for google.protobuf.FeatureSet.Utf8Validation.
+const (
+ FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation"
+ FeatureSet_Utf8Validation_enum_name = "Utf8Validation"
+)
+
+// Full and short names for google.protobuf.FeatureSet.MessageEncoding.
+const (
+ FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding"
+ FeatureSet_MessageEncoding_enum_name = "MessageEncoding"
+)
+
+// Full and short names for google.protobuf.FeatureSet.JsonFormat.
+const (
+ FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat"
+ FeatureSet_JsonFormat_enum_name = "JsonFormat"
+)
+
+// Names for google.protobuf.FeatureSetDefaults.
+const (
+ FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
+ FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults"
+)
+
+// Field names for google.protobuf.FeatureSetDefaults.
+const (
+ FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults"
+ FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition"
+ FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition"
+
+ FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults"
+ FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition"
+ FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition"
+)
+
+// Field numbers for google.protobuf.FeatureSetDefaults.
+const (
+ FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1
+ FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4
+ FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5
+)
+
+// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
+const (
+ FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault"
+ FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault"
+)
+
+// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
+const (
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
+ FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
+
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
+ FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
+)
+
+// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
+const (
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
+ FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
+)
+
// Names for google.protobuf.SourceCodeInfo.
const (
SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
index 1a509b63ebc..f55dc01e3a9 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions
func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.BoolSlice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growBoolSlice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growInt32Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growInt32Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growUint32Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growInt64Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growInt64Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := 0
+ for _, v := range b {
+ if v < 0x80 {
+ count++
+ }
+ }
+ if count > 0 {
+ p.growUint64Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt
func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := len(b) / protowire.SizeFixed32()
+ if count > 0 {
+ p.growInt32Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti
func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := len(b) / protowire.SizeFixed32()
+ if count > 0 {
+ p.growUint32Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float32Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := len(b) / protowire.SizeFixed32()
+ if count > 0 {
+ p.growFloat32Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt
func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := len(b) / protowire.SizeFixed64()
+ if count > 0 {
+ p.growInt64Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
@@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti
func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := len(b) / protowire.SizeFixed64()
+ if count > 0 {
+ p.growUint64Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
@@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float64Slice()
if wtyp == protowire.BytesType {
- s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
+ count := len(b) / protowire.SizeFixed64()
+ if count > 0 {
+ p.growFloat64Slice(count)
+ }
+ s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 61c483fac06..2ab2c629784 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
// Obtain a list of oneof wrapper types.
var oneofWrappers []reflect.Type
- for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
- if fn, ok := t.MethodByName(method); ok {
- for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
- for _, v := range vs {
- oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
- }
+ methods := make([]reflect.Method, 0, 2)
+ if m, ok := t.MethodByName("XXX_OneofFuncs"); ok {
+ methods = append(methods, m)
+ }
+ if m, ok := t.MethodByName("XXX_OneofWrappers"); ok {
+ methods = append(methods, m)
+ }
+ for _, fn := range methods {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ for _, v := range vs {
+ oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 4f5fb67a0dd..629bacdcedd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -192,12 +192,17 @@ fieldLoop:
// Derive a mapping of oneof wrappers to fields.
oneofWrappers := mi.OneofWrappers
- for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
- if fn, ok := reflect.PtrTo(t).MethodByName(method); ok {
- for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
- oneofWrappers = vs
- }
+ methods := make([]reflect.Method, 0, 2)
+ if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ methods = append(methods, m)
+ }
+ if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ methods = append(methods, m)
+ }
+ for _, fn := range methods {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ oneofWrappers = vs
}
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
index 4c491bdf482..517e94434c7 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) {
p.v.Elem().Set(v.v)
}
+func growSlice(p pointer, addCap int) {
+ // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
+ in := p.v.Elem()
+ out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
+ reflect.Copy(out, in)
+ p.v.Elem().Set(out)
+}
+
+func (p pointer) growBoolSlice(addCap int) {
+ growSlice(p, addCap)
+}
+
+func (p pointer) growInt32Slice(addCap int) {
+ growSlice(p, addCap)
+}
+
+func (p pointer) growUint32Slice(addCap int) {
+ growSlice(p, addCap)
+}
+
+func (p pointer) growInt64Slice(addCap int) {
+ growSlice(p, addCap)
+}
+
+func (p pointer) growUint64Slice(addCap int) {
+ growSlice(p, addCap)
+}
+
+func (p pointer) growFloat64Slice(addCap int) {
+ growSlice(p, addCap)
+}
+
+func (p pointer) growFloat32Slice(addCap int) {
+ growSlice(p, addCap)
+}
+
func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
func (ms *messageState) pointer() pointer { panic("not supported") }
func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index ee0e0573e39..4b020e31164 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) {
*(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p)
}
+func (p pointer) growBoolSlice(addCap int) {
+ sp := p.BoolSlice()
+ s := make([]bool, 0, addCap+len(*sp))
+ s = s[:len(*sp)]
+ copy(s, *sp)
+ *sp = s
+}
+
+func (p pointer) growInt32Slice(addCap int) {
+ sp := p.Int32Slice()
+ s := make([]int32, 0, addCap+len(*sp))
+ s = s[:len(*sp)]
+ copy(s, *sp)
+ *sp = s
+}
+
+func (p pointer) growUint32Slice(addCap int) {
+ p.growInt32Slice(addCap)
+}
+
+func (p pointer) growFloat32Slice(addCap int) {
+ p.growInt32Slice(addCap)
+}
+
+func (p pointer) growInt64Slice(addCap int) {
+ sp := p.Int64Slice()
+ s := make([]int64, 0, addCap+len(*sp))
+ s = s[:len(*sp)]
+ copy(s, *sp)
+ *sp = s
+}
+
+func (p pointer) growUint64Slice(addCap int) {
+ p.growInt64Slice(addCap)
+}
+
+func (p pointer) growFloat64Slice(addCap int) {
+ p.growInt64Slice(addCap)
+}
+
// Static check that MessageState does not exceed the size of a pointer.
const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
similarity index 96%
rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index 61a84d34185..a008acd0908 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
+//go:build !purego && !appengine && !go1.21
+// +build !purego,!appengine,!go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
new file mode 100644
index 00000000000..60166f2ba3c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -0,0 +1,74 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !purego && !appengine && go1.21
+// +build !purego,!appengine,go1.21
+
+package strs
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) []byte {
+ return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+ buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
+ n := len(prefix) + len(".") + len(name)
+ if len(prefix) == 0 {
+ n -= len(".")
+ }
+ sb.grow(n)
+ sb.buf = append(sb.buf, prefix...)
+ sb.buf = append(sb.buf, '.')
+ sb.buf = append(sb.buf, name...)
+ return protoreflect.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+ sb.grow(len(b))
+ sb.buf = append(sb.buf, b...)
+ return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+ if cap(sb.buf)-len(sb.buf) >= n {
+ return
+ }
+
+ // Unlike strings.Builder, we do not need to copy over the contents
+ // of the old buffer since our builder provides no API for
+ // retrieving previously created strings.
+ sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+ return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 0999f29d501..d8f48faffac 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,7 +51,7 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 31
+ Minor = 32
Patch = 0
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 48d47946bb1..e5b03b56771 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
// UnmarshalState parses a wire-format message and places the result in m.
//
// This method permits fine-grained control over the unmarshaler.
-// Most users should use Unmarshal instead.
+// Most users should use [Unmarshal] instead.
func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
if o.RecursionLimit == 0 {
o.RecursionLimit = protowire.DefaultRecursionLimit
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
index ec71e717fe7..80ed16a0c29 100644
--- a/vendor/google.golang.org/protobuf/proto/doc.go
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -18,27 +18,27 @@
// This package contains functions to convert to and from the wire format,
// an efficient binary serialization of protocol buffers.
//
-// • Size reports the size of a message in the wire format.
+// - [Size] reports the size of a message in the wire format.
//
-// • Marshal converts a message to the wire format.
-// The MarshalOptions type provides more control over wire marshaling.
+// - [Marshal] converts a message to the wire format.
+// The [MarshalOptions] type provides more control over wire marshaling.
//
-// • Unmarshal converts a message from the wire format.
-// The UnmarshalOptions type provides more control over wire unmarshaling.
+// - [Unmarshal] converts a message from the wire format.
+// The [UnmarshalOptions] type provides more control over wire unmarshaling.
//
// # Basic message operations
//
-// • Clone makes a deep copy of a message.
+// - [Clone] makes a deep copy of a message.
//
-// • Merge merges the content of a message into another.
+// - [Merge] merges the content of a message into another.
//
-// • Equal compares two messages. For more control over comparisons
-// and detailed reporting of differences, see package
-// "google.golang.org/protobuf/testing/protocmp".
+// - [Equal] compares two messages. For more control over comparisons
+// and detailed reporting of differences, see package
+// [google.golang.org/protobuf/testing/protocmp].
//
-// • Reset clears the content of a message.
+// - [Reset] clears the content of a message.
//
-// • CheckInitialized reports whether all required fields in a message are set.
+// - [CheckInitialized] reports whether all required fields in a message are set.
//
// # Optional scalar constructors
//
@@ -46,9 +46,9 @@
// as pointers to a value. For example, an optional string field has the
// Go type *string.
//
-// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String
-// take a value and return a pointer to a new instance of it,
-// to simplify construction of optional field values.
+// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String]
+// take a value and return a pointer to a new instance of it,
+// to simplify construction of optional field values.
//
// Generated enum types usually have an Enum method which performs the
// same operation.
@@ -57,29 +57,29 @@
//
// # Extension accessors
//
-// • HasExtension, GetExtension, SetExtension, and ClearExtension
-// access extension field values in a protocol buffer message.
+// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension]
+// access extension field values in a protocol buffer message.
//
// Extension fields are only supported in proto2.
//
// # Related packages
//
-// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
-// and from JSON.
+// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to
+// and from JSON.
//
-// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to
-// and from the text format.
+// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to
+// and from the text format.
//
-// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a
-// reflection interface for protocol buffer data types.
+// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a
+// reflection interface for protocol buffer data types.
//
-// • Package "google.golang.org/protobuf/testing/protocmp" provides features
-// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp"
-// package.
+// - Package [google.golang.org/protobuf/testing/protocmp] provides features
+// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp]
+// package.
//
-// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic
-// message type, suitable for working with messages where the protocol buffer
-// type is only known at runtime.
+// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic
+// message type, suitable for working with messages where the protocol buffer
+// type is only known at runtime.
//
// This module contains additional packages for more specialized use cases.
// Consult the individual package documentation for details.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index bf7f816d0e8..4fed202f9fc 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
// MarshalState returns the wire-format encoding of a message.
//
// This method permits fine-grained control over the marshaler.
-// Most users should use Marshal instead.
+// Most users should use [Marshal] instead.
func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
return o.marshal(in.Buf, in.Message)
}
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 5f293cda869..17899a3a767 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
}
// ClearExtension clears an extension field such that subsequent
-// HasExtension calls return false.
+// [HasExtension] calls return false.
// It panics if m is invalid or if xt does not extend m.
func ClearExtension(m Message, xt protoreflect.ExtensionType) {
m.ProtoReflect().Clear(xt.TypeDescriptor())
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index d761ab331d1..3c6fe57807b 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -21,7 +21,7 @@ import (
// The unknown fields of src are appended to the unknown fields of dst.
//
// It is semantically equivalent to unmarshaling the encoded form of src
-// into dst with the UnmarshalOptions.Merge option specified.
+// into dst with the [UnmarshalOptions.Merge] option specified.
func Merge(dst, src Message) {
// TODO: Should nil src be treated as semantically equivalent to a
// untyped, read-only, empty message? What about a nil dst?
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
index 1f0d183b102..7543ee6b255 100644
--- a/vendor/google.golang.org/protobuf/proto/proto.go
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -15,18 +15,20 @@ import (
// protobuf module that accept a Message, except where otherwise specified.
//
// This is the v2 interface definition for protobuf messages.
-// The v1 interface definition is "github.com/golang/protobuf/proto".Message.
+// The v1 interface definition is [github.com/golang/protobuf/proto.Message].
//
-// To convert a v1 message to a v2 message,
-// use "github.com/golang/protobuf/proto".MessageV2.
-// To convert a v2 message to a v1 message,
-// use "github.com/golang/protobuf/proto".MessageV1.
+// - To convert a v1 message to a v2 message,
+// use [google.golang.org/protobuf/protoadapt.MessageV2Of].
+// - To convert a v2 message to a v1 message,
+// use [google.golang.org/protobuf/protoadapt.MessageV1Of].
type Message = protoreflect.ProtoMessage
-// Error matches all errors produced by packages in the protobuf module.
+// Error matches all errors produced by packages in the protobuf module
+// according to [errors.Is].
//
-// That is, errors.Is(err, Error) reports whether an error is produced
-// by this module.
+// Example usage:
+//
+// if errors.Is(err, proto.Error) { ... }
var Error error
func init() {
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index e4dfb120506..baa0cc6218f 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -3,11 +3,11 @@
// license that can be found in the LICENSE file.
// Package protodesc provides functionality for converting
-// FileDescriptorProto messages to/from protoreflect.FileDescriptor values.
+// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values.
//
// The google.protobuf.FileDescriptorProto is a protobuf message that describes
// the type information for a .proto file in a form that is easily serializable.
-// The protoreflect.FileDescriptor is a more structured representation of
+// The [protoreflect.FileDescriptor] is a more structured representation of
// the FileDescriptorProto message where references and remote dependencies
// can be directly followed.
package protodesc
@@ -24,11 +24,11 @@ import (
"google.golang.org/protobuf/types/descriptorpb"
)
-// Resolver is the resolver used by NewFile to resolve dependencies.
+// Resolver is the resolver used by [NewFile] to resolve dependencies.
// The enums and messages provided must belong to some parent file,
// which is also registered.
//
-// It is implemented by protoregistry.Files.
+// It is implemented by [protoregistry.Files].
type Resolver interface {
FindFileByPath(string) (protoreflect.FileDescriptor, error)
FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
@@ -61,19 +61,19 @@ type FileOptions struct {
AllowUnresolvable bool
}
-// NewFile creates a new protoreflect.FileDescriptor from the provided
-// file descriptor message. See FileOptions.New for more information.
+// NewFile creates a new [protoreflect.FileDescriptor] from the provided
+// file descriptor message. See [FileOptions.New] for more information.
func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
return FileOptions{}.New(fd, r)
}
-// NewFiles creates a new protoregistry.Files from the provided
-// FileDescriptorSet message. See FileOptions.NewFiles for more information.
+// NewFiles creates a new [protoregistry.Files] from the provided
+// FileDescriptorSet message. See [FileOptions.NewFiles] for more information.
func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
return FileOptions{}.NewFiles(fd)
}
-// New creates a new protoreflect.FileDescriptor from the provided
+// New creates a new [protoreflect.FileDescriptor] from the provided
// file descriptor message. The file must represent a valid proto file according
// to protobuf semantics. The returned descriptor is a deep copy of the input.
//
@@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
f.L1.Syntax = protoreflect.Proto2
case "proto3":
f.L1.Syntax = protoreflect.Proto3
+ case "editions":
+ f.L1.Syntax = protoreflect.Editions
+ f.L1.Edition = fromEditionProto(fd.GetEdition())
default:
return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
}
+ if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) {
+ return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+ }
f.L1.Path = fd.GetName()
if f.L1.Path == "" {
return nil, errors.New("file path must be populated")
@@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
opts = proto.Clone(opts).(*descriptorpb.FileOptions)
f.L2.Options = func() protoreflect.ProtoMessage { return opts }
}
+ if f.L1.Syntax == protoreflect.Editions {
+ initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
+ }
f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
for _, i := range fd.GetPublicDependency() {
@@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) {
}
}
-// NewFiles creates a new protoregistry.Files from the provided
+// NewFiles creates a new [protoregistry.Files] from the provided
// FileDescriptorSet message. The descriptor set must include only
// valid files according to protobuf semantics. The returned descriptors
// are a deep copy of the input.
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 37efda1afe9..aff6fd4900e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -137,6 +137,30 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
if fd.JsonName != nil {
f.L1.StringName.InitJSON(fd.GetJsonName())
}
+
+ if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
+ f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd)
+ // We reuse the existing field because the old option `[packed =
+ // true]` is mutually exclusive with the editions feature.
+ if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
+ f.L1.HasPacked = true
+ f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd)
+ }
+
+ // We pretend this option is always explicitly set because the only
+ // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8
+ // or to return the appropriate default.
+ // When using editions we either parse the option or resolve the
+ // appropriate default here (instead of later when this option is
+ // requested from the descriptor).
+ // In proto2/proto3 syntax HasEnforceUTF8 might be false.
+ f.L1.HasEnforceUTF8 = true
+ f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd)
+
+ if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) {
+ f.L1.Kind = protoreflect.GroupKind
+ }
+ }
}
return fs, nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
new file mode 100644
index 00000000000..7352926cab4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -0,0 +1,177 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+ _ "embed"
+ "fmt"
+ "os"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2
+ SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023
+)
+
+//go:embed editions_defaults.binpb
+var binaryEditionDefaults []byte
+var defaults = &descriptorpb.FeatureSetDefaults{}
+var defaultsCacheMu sync.Mutex
+var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet)
+
+func init() {
+ err := proto.Unmarshal(binaryEditionDefaults, defaults)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition {
+ return filedesc.Edition(epb)
+}
+
+func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
+ switch ed {
+ case filedesc.EditionUnknown:
+ return descriptorpb.Edition_EDITION_UNKNOWN
+ case filedesc.EditionProto2:
+ return descriptorpb.Edition_EDITION_PROTO2
+ case filedesc.EditionProto3:
+ return descriptorpb.Edition_EDITION_PROTO3
+ case filedesc.Edition2023:
+ return descriptorpb.Edition_EDITION_2023
+ default:
+ panic(fmt.Sprintf("unknown value for edition: %v", ed))
+ }
+}
+
+func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
+ defaultsCacheMu.Lock()
+ defer defaultsCacheMu.Unlock()
+ if def, ok := defaultsCache[ed]; ok {
+ return def
+ }
+ edpb := toEditionProto(ed)
+ if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb {
+ // This should never happen protodesc.(FileOptions).New would fail when
+ // initializing the file descriptor.
+ // This most likely means the embedded defaults were not updated.
+ fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
+ os.Exit(1)
+ }
+ fs := defaults.GetDefaults()[0].GetFeatures()
+ // Using a linear search for now.
+ // Editions are guaranteed to be sorted and thus we could use a binary search.
+ // Given that there are only a handful of editions (with one more per year)
+ // there is not much reason to use a binary search.
+ for _, def := range defaults.GetDefaults() {
+ if def.GetEdition() <= edpb {
+ fs = def.GetFeatures()
+ } else {
+ break
+ }
+ }
+ defaultsCache[ed] = fs
+ return fs
+}
+
+func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+ fs := fieldDesc.GetOptions().GetFeatures()
+ if fs == nil || fs.FieldPresence == nil {
+ return fileDesc.L1.EditionFeatures.IsFieldPresence
+ }
+ return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
+ fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT
+}
+
+func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+ fs := fieldDesc.GetOptions().GetFeatures()
+ if fs == nil || fs.RepeatedFieldEncoding == nil {
+ return fileDesc.L1.EditionFeatures.IsPacked
+ }
+ return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED
+}
+
+func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+ fs := fieldDesc.GetOptions().GetFeatures()
+ if fs == nil || fs.Utf8Validation == nil {
+ return fileDesc.L1.EditionFeatures.IsUTF8Validated
+ }
+ return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY
+}
+
+func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+ fs := fieldDesc.GetOptions().GetFeatures()
+ if fs == nil || fs.MessageEncoding == nil {
+ return fileDesc.L1.EditionFeatures.IsDelimitedEncoded
+ }
+ return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED
+}
+
+// initFileDescFromFeatureSet initializes editions related fields in fd based
+// on fs. If fs is nil it is assumed to be an empty featureset and all fields
+// will be initialized with the appropriate default. fd.L1.Edition must be set
+// before calling this function.
+func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) {
+ dfs := getFeatureSetFor(fd.L1.Edition)
+ if fs == nil {
+ fs = &descriptorpb.FeatureSet{}
+ }
+
+ var fieldPresence descriptorpb.FeatureSet_FieldPresence
+ if fp := fs.FieldPresence; fp != nil {
+ fieldPresence = *fp
+ } else {
+ fieldPresence = *dfs.FieldPresence
+ }
+ fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
+ fieldPresence == descriptorpb.FeatureSet_EXPLICIT
+
+ var enumType descriptorpb.FeatureSet_EnumType
+ if et := fs.EnumType; et != nil {
+ enumType = *et
+ } else {
+ enumType = *dfs.EnumType
+ }
+ fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN
+
+ var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding
+ if rfe := fs.RepeatedFieldEncoding; rfe != nil {
+ respeatedFieldEncoding = *rfe
+ } else {
+ respeatedFieldEncoding = *dfs.RepeatedFieldEncoding
+ }
+ fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED
+
+ var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation
+ if utf8val := fs.Utf8Validation; utf8val != nil {
+ isUTF8Validated = *utf8val
+ } else {
+ isUTF8Validated = *dfs.Utf8Validation
+ }
+ fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY
+
+ var messageEncoding descriptorpb.FeatureSet_MessageEncoding
+ if me := fs.MessageEncoding; me != nil {
+ messageEncoding = *me
+ } else {
+ messageEncoding = *dfs.MessageEncoding
+ }
+ fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED
+
+ var jsonFormat descriptorpb.FeatureSet_JsonFormat
+ if jf := fs.JsonFormat; jf != nil {
+ jsonFormat = *jf
+ } else {
+ jsonFormat = *dfs.JsonFormat
+ }
+ fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
new file mode 100644
index 00000000000..1a8610a8439
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
@@ -0,0 +1,4 @@
+
+ (0æ
+ (0ç
+ (0è æ(è
\ No newline at end of file
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index a7c5ceffc9b..9d6e05420f7 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -16,7 +16,7 @@ import (
"google.golang.org/protobuf/types/descriptorpb"
)
-// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a
+// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a
// google.protobuf.FileDescriptorProto message.
func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
p := &descriptorpb.FileDescriptorProto{
@@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
for i, exts := 0, file.Extensions(); i < exts.Len(); i++ {
p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
}
- if syntax := file.Syntax(); syntax != protoreflect.Proto2 {
+ if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
p.Syntax = proto.String(file.Syntax().String())
}
return p
}
-// ToDescriptorProto copies a protoreflect.MessageDescriptor into a
+// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a
// google.protobuf.DescriptorProto message.
func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
p := &descriptorpb.DescriptorProto{
@@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des
return p
}
-// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a
+// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a
// google.protobuf.FieldDescriptorProto message.
func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
p := &descriptorpb.FieldDescriptorProto{
@@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi
return p
}
-// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a
+// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a
// google.protobuf.OneofDescriptorProto message.
func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
return &descriptorpb.OneofDescriptorProto{
@@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On
}
}
-// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a
+// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a
// google.protobuf.EnumDescriptorProto message.
func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
p := &descriptorpb.EnumDescriptorProto{
@@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD
return p
}
-// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a
+// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a
// google.protobuf.EnumValueDescriptorProto message.
func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
return &descriptorpb.EnumValueDescriptorProto{
@@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip
}
}
-// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a
+// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a
// google.protobuf.ServiceDescriptorProto message.
func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
p := &descriptorpb.ServiceDescriptorProto{
@@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto
return p
}
-// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a
+// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a
// google.protobuf.MethodDescriptorProto message.
func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
p := &descriptorpb.MethodDescriptorProto{
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index 55aa14922b0..ec6572dfda9 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -10,46 +10,46 @@
//
// # Protocol Buffer Descriptors
//
-// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
+// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor])
// are immutable objects that represent protobuf type information.
// They are wrappers around the messages declared in descriptor.proto.
// Protobuf descriptors alone lack any information regarding Go types.
//
-// Enums and messages generated by this module implement Enum and ProtoMessage,
+// Enums and messages generated by this module implement [Enum] and [ProtoMessage],
// where the Descriptor and ProtoReflect.Descriptor accessors respectively
// return the protobuf descriptor for the values.
//
// The protobuf descriptor interfaces are not meant to be implemented by
// user code since they might need to be extended in the future to support
// additions to the protobuf language.
-// The "google.golang.org/protobuf/reflect/protodesc" package converts between
+// The [google.golang.org/protobuf/reflect/protodesc] package converts between
// google.protobuf.DescriptorProto messages and protobuf descriptors.
//
// # Go Type Descriptors
//
-// A type descriptor (e.g., EnumType or MessageType) is a constructor for
+// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for
// a concrete Go type that represents the associated protobuf descriptor.
// There is commonly a one-to-one relationship between protobuf descriptors and
// Go type descriptors, but it can potentially be a one-to-many relationship.
//
-// Enums and messages generated by this module implement Enum and ProtoMessage,
+// Enums and messages generated by this module implement [Enum] and [ProtoMessage],
// where the Type and ProtoReflect.Type accessors respectively
// return the protobuf descriptor for the values.
//
-// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
+// The [google.golang.org/protobuf/types/dynamicpb] package can be used to
// create Go type descriptors from protobuf descriptors.
//
// # Value Interfaces
//
-// The Enum and Message interfaces provide a reflective view over an
+// The [Enum] and [Message] interfaces provide a reflective view over an
// enum or message instance. For enums, it provides the ability to retrieve
// the enum value number for any concrete enum type. For messages, it provides
// the ability to access or manipulate fields of the message.
//
-// To convert a proto.Message to a protoreflect.Message, use the
+// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the
// former's ProtoReflect method. Since the ProtoReflect method is new to the
// v2 message interface, it may not be present on older message implementations.
-// The "github.com/golang/protobuf/proto".MessageReflect function can be used
+// The [github.com/golang/protobuf/proto.MessageReflect] function can be used
// to obtain a reflective view on older messages.
//
// # Relationships
@@ -71,12 +71,12 @@
// │ │
// └────────────────── Type() ───────┘
//
-// • An EnumType describes a concrete Go enum type.
+// • An [EnumType] describes a concrete Go enum type.
// It has an EnumDescriptor and can construct an Enum instance.
//
-// • An EnumDescriptor describes an abstract protobuf enum type.
+// • An [EnumDescriptor] describes an abstract protobuf enum type.
//
-// • An Enum is a concrete enum instance. Generated enums implement Enum.
+// • An [Enum] is a concrete enum instance. Generated enums implement Enum.
//
// ┌──────────────── New() ─────────────────â”
// │ │
@@ -90,24 +90,26 @@
// │ │
// └─────────────────── Type() ─────────┘
//
-// • A MessageType describes a concrete Go message type.
-// It has a MessageDescriptor and can construct a Message instance.
-// Just as how Go's reflect.Type is a reflective description of a Go type,
-// a MessageType is a reflective description of a Go type for a protobuf message.
+// • A [MessageType] describes a concrete Go message type.
+// It has a [MessageDescriptor] and can construct a [Message] instance.
+// Just as how Go's [reflect.Type] is a reflective description of a Go type,
+// a [MessageType] is a reflective description of a Go type for a protobuf message.
//
-// • A MessageDescriptor describes an abstract protobuf message type.
-// It has no understanding of Go types. In order to construct a MessageType
-// from just a MessageDescriptor, you can consider looking up the message type
-// in the global registry using protoregistry.GlobalTypes.FindMessageByName
-// or constructing a dynamic MessageType using dynamicpb.NewMessageType.
+// • A [MessageDescriptor] describes an abstract protobuf message type.
+// It has no understanding of Go types. In order to construct a [MessageType]
+// from just a [MessageDescriptor], you can consider looking up the message type
+// in the global registry using the FindMessageByName method on
+// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes]
+// or constructing a dynamic [MessageType] using
+// [google.golang.org/protobuf/types/dynamicpb.NewMessageType].
//
-// • A Message is a reflective view over a concrete message instance.
-// Generated messages implement ProtoMessage, which can convert to a Message.
-// Just as how Go's reflect.Value is a reflective view over a Go value,
-// a Message is a reflective view over a concrete protobuf message instance.
-// Using Go reflection as an analogy, the ProtoReflect method is similar to
-// calling reflect.ValueOf, and the Message.Interface method is similar to
-// calling reflect.Value.Interface.
+// • A [Message] is a reflective view over a concrete message instance.
+// Generated messages implement [ProtoMessage], which can convert to a [Message].
+// Just as how Go's [reflect.Value] is a reflective view over a Go value,
+// a [Message] is a reflective view over a concrete protobuf message instance.
+// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to
+// calling [reflect.ValueOf], and the [Message.Interface] method is similar to
+// calling [reflect.Value.Interface].
//
// ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────â”
// │ V │ V
@@ -119,15 +121,15 @@
// │ │
// └────── implements ────────┘
//
-// • An ExtensionType describes a concrete Go implementation of an extension.
-// It has an ExtensionTypeDescriptor and can convert to/from
-// abstract Values and Go values.
+// • An [ExtensionType] describes a concrete Go implementation of an extension.
+// It has an [ExtensionTypeDescriptor] and can convert to/from
+// an abstract [Value] and a Go value.
//
-// • An ExtensionTypeDescriptor is an ExtensionDescriptor
-// which also has an ExtensionType.
+// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor]
+// which also has an [ExtensionType].
//
-// • An ExtensionDescriptor describes an abstract protobuf extension field and
-// may not always be an ExtensionTypeDescriptor.
+// • An [ExtensionDescriptor] describes an abstract protobuf extension field and
+// may not always be an [ExtensionTypeDescriptor].
package protoreflect
import (
@@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement
// ProtoMessage is the top-level interface that all proto messages implement.
// This is declared in the protoreflect package to avoid a cyclic dependency;
-// use the proto.Message type instead, which aliases this type.
+// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type.
type ProtoMessage interface{ ProtoReflect() Message }
// Syntax is the language version of the proto file.
@@ -151,8 +153,9 @@ type Syntax syntax
type syntax int8 // keep exact type opaque as the int type may change
const (
- Proto2 Syntax = 2
- Proto3 Syntax = 3
+ Proto2 Syntax = 2
+ Proto3 Syntax = 3
+ Editions Syntax = 4
)
// IsValid reports whether the syntax is valid.
@@ -436,7 +439,7 @@ type Names interface {
// FullName is a qualified name that uniquely identifies a proto declaration.
// A qualified name is the concatenation of the proto package along with the
// fully-declared name (i.e., name of parent preceding the name of the child),
-// with a '.' delimiter placed between each Name.
+// with a '.' delimiter placed between each [Name].
//
// This should not have any leading or trailing dots.
type FullName string // e.g., "google.protobuf.Field.Kind"
@@ -480,7 +483,7 @@ func isLetterDigit(c byte) bool {
}
// Name returns the short name, which is the last identifier segment.
-// A single segment FullName is the Name itself.
+// A single segment FullName is the [Name] itself.
func (n FullName) Name() Name {
if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
return Name(n[i+1:])
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index 717b106f3da..0c045db6ab6 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
case 12:
b = p.appendSingularField(b, "syntax", nil)
- case 13:
+ case 14:
b = p.appendSingularField(b, "edition", nil)
}
return b
@@ -180,6 +180,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte {
b = p.appendSingularField(b, "php_metadata_namespace", nil)
case 45:
b = p.appendSingularField(b, "ruby_package", nil)
+ case 50:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -240,6 +242,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte {
b = p.appendSingularField(b, "map_entry", nil)
case 11:
b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
+ case 12:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -285,6 +289,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte {
b = p.appendSingularField(b, "deprecated", nil)
case 6:
b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
+ case 7:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -330,6 +336,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte {
return b
}
switch (*p)[0] {
+ case 34:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 33:
b = p.appendSingularField(b, "deprecated", nil)
case 999:
@@ -361,16 +369,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
b = p.appendSingularField(b, "debug_redact", nil)
case 17:
b = p.appendSingularField(b, "retention", nil)
- case 18:
- b = p.appendSingularField(b, "target", nil)
case 19:
b = p.appendRepeatedField(b, "targets", nil)
+ case 20:
+ b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault)
+ case 21:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
return b
}
+func (p *SourcePath) appendFeatureSet(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "field_presence", nil)
+ case 2:
+ b = p.appendSingularField(b, "enum_type", nil)
+ case 3:
+ b = p.appendSingularField(b, "repeated_field_encoding", nil)
+ case 4:
+ b = p.appendSingularField(b, "utf8_validation", nil)
+ case 5:
+ b = p.appendSingularField(b, "message_encoding", nil)
+ case 6:
+ b = p.appendSingularField(b, "json_format", nil)
+ }
+ return b
+}
+
func (p *SourcePath) appendUninterpretedOption(b []byte) []byte {
if len(*p) == 0 {
return b
@@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
case 2:
b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration)
+ case 50:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 3:
b = p.appendSingularField(b, "verification", nil)
}
@@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte {
return b
}
switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -446,6 +481,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
switch (*p)[0] {
case 1:
b = p.appendSingularField(b, "deprecated", nil)
+ case 2:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
+ case 3:
+ b = p.appendSingularField(b, "debug_redact", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -461,12 +500,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte {
b = p.appendSingularField(b, "deprecated", nil)
case 34:
b = p.appendSingularField(b, "idempotency_level", nil)
+ case 35:
+ b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
return b
}
+func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 3:
+ b = p.appendSingularField(b, "edition", nil)
+ case 2:
+ b = p.appendSingularField(b, "value", nil)
+ }
+ return b
+}
+
func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
if len(*p) == 0 {
return b
@@ -491,8 +545,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte {
b = p.appendSingularField(b, "full_name", nil)
case 3:
b = p.appendSingularField(b, "type", nil)
- case 4:
- b = p.appendSingularField(b, "is_repeated", nil)
case 5:
b = p.appendSingularField(b, "reserved", nil)
case 6:
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 3867470d30a..60ff62b4c85 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -12,7 +12,7 @@ package protoreflect
// exactly identical. However, it is possible for the same semantically
// identical proto type to be represented by multiple type descriptors.
//
-// For example, suppose we have t1 and t2 which are both MessageDescriptors.
+// For example, suppose we have t1 and t2 which are both an [MessageDescriptor].
// If t1 == t2, then the types are definitely equal and all accessors return
// the same information. However, if t1 != t2, then it is still possible that
// they still represent the same proto type (e.g., t1.FullName == t2.FullName).
@@ -115,7 +115,7 @@ type Descriptor interface {
// corresponds with the google.protobuf.FileDescriptorProto message.
//
// Top-level declarations:
-// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor.
+// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor].
type FileDescriptor interface {
Descriptor // Descriptor.FullName is identical to Package
@@ -180,8 +180,8 @@ type FileImport struct {
// corresponds with the google.protobuf.DescriptorProto message.
//
// Nested declarations:
-// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor,
-// and/or MessageDescriptor.
+// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor],
+// and/or [MessageDescriptor].
type MessageDescriptor interface {
Descriptor
@@ -214,7 +214,7 @@ type MessageDescriptor interface {
ExtensionRanges() FieldRanges
// ExtensionRangeOptions returns the ith extension range options.
//
- // To avoid a dependency cycle, this method returns a proto.Message value,
+ // To avoid a dependency cycle, this method returns a proto.Message] value,
// which always contains a google.protobuf.ExtensionRangeOptions message.
// This method returns a typed nil-pointer if no options are present.
// The caller must import the descriptorpb package to use this.
@@ -231,9 +231,9 @@ type MessageDescriptor interface {
}
type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
-// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation.
// It is recommended that implementations of this interface also implement the
-// MessageFieldTypes interface.
+// [MessageFieldTypes] interface.
type MessageType interface {
// New returns a newly allocated empty message.
// It may return nil for synthetic messages representing a map entry.
@@ -249,19 +249,19 @@ type MessageType interface {
Descriptor() MessageDescriptor
}
-// MessageFieldTypes extends a MessageType by providing type information
+// MessageFieldTypes extends a [MessageType] by providing type information
// regarding enums and messages referenced by the message fields.
type MessageFieldTypes interface {
MessageType
- // Enum returns the EnumType for the ith field in Descriptor.Fields.
+ // Enum returns the EnumType for the ith field in MessageDescriptor.Fields.
// It returns nil if the ith field is not an enum kind.
// It panics if out of bounds.
//
// Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum()
Enum(i int) EnumType
- // Message returns the MessageType for the ith field in Descriptor.Fields.
+ // Message returns the MessageType for the ith field in MessageDescriptor.Fields.
// It returns nil if the ith field is not a message or group kind.
// It panics if out of bounds.
//
@@ -286,8 +286,8 @@ type MessageDescriptors interface {
// corresponds with the google.protobuf.FieldDescriptorProto message.
//
// It is used for both normal fields defined within the parent message
-// (e.g., MessageDescriptor.Fields) and fields that extend some remote message
-// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions).
+// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message
+// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]).
type FieldDescriptor interface {
Descriptor
@@ -344,7 +344,7 @@ type FieldDescriptor interface {
// IsMap reports whether this field represents a map,
// where the value type for the associated field is a Map.
// It is equivalent to checking whether Cardinality is Repeated,
- // that the Kind is MessageKind, and that Message.IsMapEntry reports true.
+ // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true.
IsMap() bool
// MapKey returns the field descriptor for the key in the map entry.
@@ -419,7 +419,7 @@ type OneofDescriptor interface {
// IsSynthetic reports whether this is a synthetic oneof created to support
// proto3 optional semantics. If true, Fields contains exactly one field
- // with HasOptionalKeyword specified.
+ // with FieldDescriptor.HasOptionalKeyword specified.
IsSynthetic() bool
// Fields is a list of fields belonging to this oneof.
@@ -442,10 +442,10 @@ type OneofDescriptors interface {
doNotImplement
}
-// ExtensionDescriptor is an alias of FieldDescriptor for documentation.
+// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation.
type ExtensionDescriptor = FieldDescriptor
-// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType.
+// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType].
type ExtensionTypeDescriptor interface {
ExtensionDescriptor
@@ -470,12 +470,12 @@ type ExtensionDescriptors interface {
doNotImplement
}
-// ExtensionType encapsulates an ExtensionDescriptor with a concrete
+// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete
// Go implementation. The nested field descriptor must be for a extension field.
//
// While a normal field is a member of the parent message that it is declared
-// within (see Descriptor.Parent), an extension field is a member of some other
-// target message (see ExtensionDescriptor.Extendee) and may have no
+// within (see [Descriptor.Parent]), an extension field is a member of some other
+// target message (see [FieldDescriptor.ContainingMessage]) and may have no
// relationship with the parent. However, the full name of an extension field is
// relative to the parent that it is declared within.
//
@@ -532,7 +532,7 @@ type ExtensionType interface {
// corresponds with the google.protobuf.EnumDescriptorProto message.
//
// Nested declarations:
-// EnumValueDescriptor.
+// [EnumValueDescriptor].
type EnumDescriptor interface {
Descriptor
@@ -548,7 +548,7 @@ type EnumDescriptor interface {
}
type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
-// EnumType encapsulates an EnumDescriptor with a concrete Go implementation.
+// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation.
type EnumType interface {
// New returns an instance of this enum type with its value set to n.
New(n EnumNumber) Enum
@@ -610,7 +610,7 @@ type EnumValueDescriptors interface {
// ServiceDescriptor describes a service and
// corresponds with the google.protobuf.ServiceDescriptorProto message.
//
-// Nested declarations: MethodDescriptor.
+// Nested declarations: [MethodDescriptor].
type ServiceDescriptor interface {
Descriptor
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index 37601b78199..a7b0d06ff32 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -27,16 +27,16 @@ type Enum interface {
// Message is a reflective interface for a concrete message value,
// encapsulating both type and value information for the message.
//
-// Accessor/mutators for individual fields are keyed by FieldDescriptor.
+// Accessor/mutators for individual fields are keyed by [FieldDescriptor].
// For non-extension fields, the descriptor must exactly match the
// field known by the parent message.
-// For extension fields, the descriptor must implement ExtensionTypeDescriptor,
-// extend the parent message (i.e., have the same message FullName), and
+// For extension fields, the descriptor must implement [ExtensionTypeDescriptor],
+// extend the parent message (i.e., have the same message [FullName]), and
// be within the parent's extension range.
//
-// Each field Value can be a scalar or a composite type (Message, List, or Map).
-// See Value for the Go types associated with a FieldDescriptor.
-// Providing a Value that is invalid or of an incorrect type panics.
+// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]).
+// See [Value] for the Go types associated with a [FieldDescriptor].
+// Providing a [Value] that is invalid or of an incorrect type panics.
type Message interface {
// Descriptor returns message descriptor, which contains only the protobuf
// type information for the message.
@@ -152,7 +152,7 @@ type Message interface {
// This method may return nil.
//
// The returned methods type is identical to
- // "google.golang.org/protobuf/runtime/protoiface".Methods.
+ // google.golang.org/protobuf/runtime/protoiface.Methods.
// Consult the protoiface package documentation for details.
ProtoMethods() *methods
}
@@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool {
}
// List is a zero-indexed, ordered list.
-// The element Value type is determined by FieldDescriptor.Kind.
-// Providing a Value that is invalid or of an incorrect type panics.
+// The element [Value] type is determined by [FieldDescriptor.Kind].
+// Providing a [Value] that is invalid or of an incorrect type panics.
type List interface {
// Len reports the number of entries in the List.
// Get, Set, and Truncate panic with out of bound indexes.
@@ -226,9 +226,9 @@ type List interface {
}
// Map is an unordered, associative map.
-// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind.
-// The entry Value type is determined by FieldDescriptor.MapValue.Kind.
-// Providing a MapKey or Value that is invalid or of an incorrect type panics.
+// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind.
+// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind.
+// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics.
type Map interface {
// Len reports the number of elements in the map.
Len() int
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
index 591652541f2..654599d4493 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
@@ -24,19 +24,19 @@ import (
// Unlike the == operator, a NaN is equal to another NaN.
//
// - Enums are equal if they contain the same number.
-// Since Value does not contain an enum descriptor,
+// Since [Value] does not contain an enum descriptor,
// enum values do not consider the type of the enum.
//
// - Other scalar values are equal if they contain the same value.
//
-// - Message values are equal if they belong to the same message descriptor,
+// - [Message] values are equal if they belong to the same message descriptor,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
-// - Lists are equal if they are the same length and
+// - [List] values are equal if they are the same length and
// each corresponding element is equal.
//
-// - Maps are equal if they have the same set of keys and
+// - [Map] values are equal if they have the same set of keys and
// the corresponding value for each key is equal.
func (v1 Value) Equal(v2 Value) bool {
return equalValue(v1, v2)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index 08e5ef73fc0..1603097311e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -11,7 +11,7 @@ import (
// Value is a union where only one Go type may be set at a time.
// The Value is used to represent all possible values a field may take.
-// The following shows which Go type is used to represent each proto Kind:
+// The following shows which Go type is used to represent each proto [Kind]:
//
// â•”â•â•â•â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•—
// ║ Go type │ Protobuf kind ║
@@ -31,22 +31,22 @@ import (
//
// Multiple protobuf Kinds may be represented by a single Go type if the type
// can losslessly represent the information for the proto kind. For example,
-// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64,
+// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64,
// but use different integer encoding methods.
//
-// The List or Map types are used if the field cardinality is repeated.
-// A field is a List if FieldDescriptor.IsList reports true.
-// A field is a Map if FieldDescriptor.IsMap reports true.
+// The [List] or [Map] types are used if the field cardinality is repeated.
+// A field is a [List] if [FieldDescriptor.IsList] reports true.
+// A field is a [Map] if [FieldDescriptor.IsMap] reports true.
//
// Converting to/from a Value and a concrete Go value panics on type mismatch.
-// For example, ValueOf("hello").Int() panics because this attempts to
+// For example, [ValueOf]("hello").Int() panics because this attempts to
// retrieve an int64 from a string.
//
-// List, Map, and Message Values are called "composite" values.
+// [List], [Map], and [Message] Values are called "composite" values.
//
// A composite Value may alias (reference) memory at some location,
// such that changes to the Value updates the that location.
-// A composite value acquired with a Mutable method, such as Message.Mutable,
+// A composite value acquired with a Mutable method, such as [Message.Mutable],
// always references the source object.
//
// For example:
@@ -65,7 +65,7 @@ import (
// // appending to the List here may or may not modify the message.
// list.Append(protoreflect.ValueOfInt32(0))
//
-// Some operations, such as Message.Get, may return an "empty, read-only"
+// Some operations, such as [Message.Get], may return an "empty, read-only"
// composite Value. Modifying an empty, read-only value panics.
type Value value
@@ -306,7 +306,7 @@ func (v Value) Float() float64 {
}
}
-// String returns v as a string. Since this method implements fmt.Stringer,
+// String returns v as a string. Since this method implements [fmt.Stringer],
// this returns the formatted string value for any non-string type.
func (v Value) String() string {
switch v.typ {
@@ -327,7 +327,7 @@ func (v Value) Bytes() []byte {
}
}
-// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber.
+// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber].
func (v Value) Enum() EnumNumber {
switch v.typ {
case enumType:
@@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber {
}
}
-// Message returns v as a Message and panics if the type is not a Message.
+// Message returns v as a [Message] and panics if the type is not a [Message].
func (v Value) Message() Message {
switch vi := v.getIface().(type) {
case Message:
@@ -347,7 +347,7 @@ func (v Value) Message() Message {
}
}
-// List returns v as a List and panics if the type is not a List.
+// List returns v as a [List] and panics if the type is not a [List].
func (v Value) List() List {
switch vi := v.getIface().(type) {
case List:
@@ -357,7 +357,7 @@ func (v Value) List() List {
}
}
-// Map returns v as a Map and panics if the type is not a Map.
+// Map returns v as a [Map] and panics if the type is not a [Map].
func (v Value) Map() Map {
switch vi := v.getIface().(type) {
case Map:
@@ -367,7 +367,7 @@ func (v Value) Map() Map {
}
}
-// MapKey returns v as a MapKey and panics for invalid MapKey types.
+// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types.
func (v Value) MapKey() MapKey {
switch v.typ {
case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
@@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey {
}
// MapKey is used to index maps, where the Go type of the MapKey must match
-// the specified key Kind (see MessageDescriptor.IsMapEntry).
-// The following shows what Go type is used to represent each proto Kind:
+// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]).
+// The following shows what Go type is used to represent each proto [Kind]:
//
// â•”â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•—
// ║ Go type │ Protobuf kind ║
@@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey {
// ║ string │ StringKind ║
// â•šâ•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
//
-// A MapKey is constructed and accessed through a Value:
+// A MapKey is constructed and accessed through a [Value]:
//
// k := ValueOf("hash").MapKey() // convert string to MapKey
// s := k.String() // convert MapKey to string
//
-// The MapKey is a strict subset of valid types used in Value;
-// converting a Value to a MapKey with an invalid type panics.
+// The MapKey is a strict subset of valid types used in [Value];
+// converting a [Value] to a MapKey with an invalid type panics.
type MapKey value
// IsValid reports whether k is populated with a value.
@@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 {
return Value(k).Uint()
}
-// String returns k as a string. Since this method implements fmt.Stringer,
+// String returns k as a string. Since this method implements [fmt.Stringer],
// this returns the formatted string value for any non-string type.
func (k MapKey) String() string {
return Value(k).String()
}
-// Value returns k as a Value.
+// Value returns k as a [Value].
func (k MapKey) Value() Value {
return Value(k)
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
similarity index 97%
rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 702ddf22a27..b1fdbe3e8e1 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
+//go:build !purego && !appengine && !go1.21
+// +build !purego,!appengine,!go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
new file mode 100644
index 00000000000..43547011173
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -0,0 +1,87 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !purego && !appengine && go1.21
+// +build !purego,!appengine,go1.21
+
+package protoreflect
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+ ifaceHeader struct {
+ _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+)
+
+var (
+ nilType = typeOf(nil)
+ boolType = typeOf(*new(bool))
+ int32Type = typeOf(*new(int32))
+ int64Type = typeOf(*new(int64))
+ uint32Type = typeOf(*new(uint32))
+ uint64Type = typeOf(*new(uint64))
+ float32Type = typeOf(*new(float32))
+ float64Type = typeOf(*new(float64))
+ stringType = typeOf(*new(string))
+ bytesType = typeOf(*new([]byte))
+ enumType = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t interface{}) unsafe.Pointer {
+ return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ // typ stores the type of the value as a pointer to the Go type.
+ typ unsafe.Pointer // 8B
+
+ // ptr stores the data pointer for a String, Bytes, or interface value.
+ ptr unsafe.Pointer // 8B
+
+ // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+ // Enum value as a raw uint64.
+ //
+ // It is also used to store the length of a String or Bytes value;
+ // the capacity is ignored.
+ num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+ return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+ return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
+}
+func valueOfIface(v interface{}) Value {
+ p := (*ifaceHeader)(unsafe.Pointer(&v))
+ return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() string {
+ return unsafe.String((*byte)(v.ptr), v.num)
+}
+func (v Value) getBytes() []byte {
+ return unsafe.Slice((*byte)(v.ptr), v.num)
+}
+func (v Value) getIface() (x interface{}) {
+ *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+ return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index aeb55977446..6267dc52a67 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -5,12 +5,12 @@
// Package protoregistry provides data structures to register and lookup
// protobuf descriptor types.
//
-// The Files registry contains file descriptors and provides the ability
+// The [Files] registry contains file descriptors and provides the ability
// to iterate over the files or lookup a specific descriptor within the files.
-// Files only contains protobuf descriptors and has no understanding of Go
+// [Files] only contains protobuf descriptors and has no understanding of Go
// type information that may be associated with each descriptor.
//
-// The Types registry contains descriptor types for which there is a known
+// The [Types] registry contains descriptor types for which there is a known
// Go type associated with that descriptor. It provides the ability to iterate
// over the registered types or lookup a type by name.
package protoregistry
@@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) {
// FindDescriptorByName looks up a descriptor by the full name.
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
if r == nil {
return nil, NotFound
@@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) {
// FindFileByPath looks up a file by the path.
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
// This returns an error if multiple files have the same path.
func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
if r == nil {
@@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec
// A compliant implementation must deterministically return the same type
// if no error is encountered.
//
-// The Types type implements this interface.
+// The [Types] type implements this interface.
type MessageTypeResolver interface {
// FindMessageByName looks up a message by its full name.
// E.g., "google.protobuf.Any"
@@ -451,7 +451,7 @@ type MessageTypeResolver interface {
// A compliant implementation must deterministically return the same type
// if no error is encountered.
//
-// The Types type implements this interface.
+// The [Types] type implements this interface.
type ExtensionTypeResolver interface {
// FindExtensionByName looks up a extension field by the field's full name.
// Note that this is the full name of the field as determined by
@@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac
// FindEnumByName looks up an enum by its full name.
// E.g., "google.protobuf.Field.Kind".
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) {
if r == nil {
return nil, NotFound
@@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp
// FindMessageByName looks up a message by its full name,
// e.g. "google.protobuf.Any".
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
if r == nil {
return nil, NotFound
@@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M
// FindMessageByURL looks up a message by a URL identifier.
// See documentation on google.protobuf.Any.type_url for the URL format.
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
// This function is similar to FindMessageByName but
// truncates anything before and including '/' in the URL.
@@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
// where the extension is declared and is unrelated to the full name of the
// message being extended.
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if r == nil {
return nil, NotFound
@@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E
// FindExtensionByNumber looks up a extension field by the field number
// within some parent message, identified by full name.
//
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if r == nil {
return nil, NotFound
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 04c00f737c1..38daa858d07 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -48,6 +48,94 @@ import (
sync "sync"
)
+// The full set of known editions.
+type Edition int32
+
+const (
+ // A placeholder for an unknown edition value.
+ Edition_EDITION_UNKNOWN Edition = 0
+ // Legacy syntax "editions". These pre-date editions, but behave much like
+ // distinct editions. These can't be used to specify the edition of proto
+ // files, but feature definitions must supply proto2/proto3 defaults for
+ // backwards compatibility.
+ Edition_EDITION_PROTO2 Edition = 998
+ Edition_EDITION_PROTO3 Edition = 999
+ // Editions that have been released. The specific values are arbitrary and
+ // should not be depended on, but they will always be time-ordered for easy
+ // comparison.
+ Edition_EDITION_2023 Edition = 1000
+ // Placeholder editions for testing feature resolution. These should not be
+ // used or relyed on outside of tests.
+ Edition_EDITION_1_TEST_ONLY Edition = 1
+ Edition_EDITION_2_TEST_ONLY Edition = 2
+ Edition_EDITION_99997_TEST_ONLY Edition = 99997
+ Edition_EDITION_99998_TEST_ONLY Edition = 99998
+ Edition_EDITION_99999_TEST_ONLY Edition = 99999
+)
+
+// Enum value maps for Edition.
+var (
+ Edition_name = map[int32]string{
+ 0: "EDITION_UNKNOWN",
+ 998: "EDITION_PROTO2",
+ 999: "EDITION_PROTO3",
+ 1000: "EDITION_2023",
+ 1: "EDITION_1_TEST_ONLY",
+ 2: "EDITION_2_TEST_ONLY",
+ 99997: "EDITION_99997_TEST_ONLY",
+ 99998: "EDITION_99998_TEST_ONLY",
+ 99999: "EDITION_99999_TEST_ONLY",
+ }
+ Edition_value = map[string]int32{
+ "EDITION_UNKNOWN": 0,
+ "EDITION_PROTO2": 998,
+ "EDITION_PROTO3": 999,
+ "EDITION_2023": 1000,
+ "EDITION_1_TEST_ONLY": 1,
+ "EDITION_2_TEST_ONLY": 2,
+ "EDITION_99997_TEST_ONLY": 99997,
+ "EDITION_99998_TEST_ONLY": 99998,
+ "EDITION_99999_TEST_ONLY": 99999,
+ }
+)
+
+func (x Edition) Enum() *Edition {
+ p := new(Edition)
+ *p = x
+ return p
+}
+
+func (x Edition) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Edition) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
+}
+
+func (Edition) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[0]
+}
+
+func (x Edition) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *Edition) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = Edition(num)
+ return nil
+}
+
+// Deprecated: Use Edition.Descriptor instead.
+func (Edition) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
+}
+
// The verification state of the extension range.
type ExtensionRangeOptions_VerificationState int32
@@ -80,11 +168,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
}
func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
}
func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[0]
+ return &file_google_protobuf_descriptor_proto_enumTypes[1]
}
func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -125,9 +213,10 @@ const (
FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
// Tag-delimited aggregate.
- // Group type is deprecated and not supported in proto3. However, Proto3
+ // Group type is deprecated and not supported after google.protobuf. However, Proto3
// implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields.
+ // treat group fields as unknown fields. In Editions, the group wire format
+ // can be enabled via the `message_encoding` feature.
FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate.
// New in version 2.
@@ -195,11 +284,11 @@ func (x FieldDescriptorProto_Type) String() string {
}
func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
}
func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[1]
+ return &file_google_protobuf_descriptor_proto_enumTypes[2]
}
func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -226,21 +315,24 @@ type FieldDescriptorProto_Label int32
const (
// 0 is reserved for errors
FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
- FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+ // The required label is only allowed in google.protobuf. In proto3 and Editions
+ // it's explicitly prohibited. In Editions, the `field_presence` feature
+ // can be used to get this behavior.
+ FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
)
// Enum value maps for FieldDescriptorProto_Label.
var (
FieldDescriptorProto_Label_name = map[int32]string{
1: "LABEL_OPTIONAL",
- 2: "LABEL_REQUIRED",
3: "LABEL_REPEATED",
+ 2: "LABEL_REQUIRED",
}
FieldDescriptorProto_Label_value = map[string]int32{
"LABEL_OPTIONAL": 1,
- "LABEL_REQUIRED": 2,
"LABEL_REPEATED": 3,
+ "LABEL_REQUIRED": 2,
}
)
@@ -255,11 +347,11 @@ func (x FieldDescriptorProto_Label) String() string {
}
func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
}
func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[2]
+ return &file_google_protobuf_descriptor_proto_enumTypes[3]
}
func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -316,11 +408,11 @@ func (x FileOptions_OptimizeMode) String() string {
}
func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
}
func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[3]
+ return &file_google_protobuf_descriptor_proto_enumTypes[4]
}
func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -382,11 +474,11 @@ func (x FieldOptions_CType) String() string {
}
func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
}
func (FieldOptions_CType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[4]
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
}
func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -444,11 +536,11 @@ func (x FieldOptions_JSType) String() string {
}
func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
}
func (FieldOptions_JSType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[5]
+ return &file_google_protobuf_descriptor_proto_enumTypes[6]
}
func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -506,11 +598,11 @@ func (x FieldOptions_OptionRetention) String() string {
}
func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
}
func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[6]
+ return &file_google_protobuf_descriptor_proto_enumTypes[7]
}
func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -590,11 +682,11 @@ func (x FieldOptions_OptionTargetType) String() string {
}
func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
}
func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[7]
+ return &file_google_protobuf_descriptor_proto_enumTypes[8]
}
func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -652,11 +744,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
}
func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
}
func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[8]
+ return &file_google_protobuf_descriptor_proto_enumTypes[9]
}
func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -678,6 +770,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0}
}
+type FeatureSet_FieldPresence int32
+
+const (
+ FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0
+ FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1
+ FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2
+ FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3
+)
+
+// Enum value maps for FeatureSet_FieldPresence.
+var (
+ FeatureSet_FieldPresence_name = map[int32]string{
+ 0: "FIELD_PRESENCE_UNKNOWN",
+ 1: "EXPLICIT",
+ 2: "IMPLICIT",
+ 3: "LEGACY_REQUIRED",
+ }
+ FeatureSet_FieldPresence_value = map[string]int32{
+ "FIELD_PRESENCE_UNKNOWN": 0,
+ "EXPLICIT": 1,
+ "IMPLICIT": 2,
+ "LEGACY_REQUIRED": 3,
+ }
+)
+
+func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence {
+ p := new(FeatureSet_FieldPresence)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_FieldPresence) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+}
+
+func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[10]
+}
+
+func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_FieldPresence(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead.
+func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
+type FeatureSet_EnumType int32
+
+const (
+ FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0
+ FeatureSet_OPEN FeatureSet_EnumType = 1
+ FeatureSet_CLOSED FeatureSet_EnumType = 2
+)
+
+// Enum value maps for FeatureSet_EnumType.
+var (
+ FeatureSet_EnumType_name = map[int32]string{
+ 0: "ENUM_TYPE_UNKNOWN",
+ 1: "OPEN",
+ 2: "CLOSED",
+ }
+ FeatureSet_EnumType_value = map[string]int32{
+ "ENUM_TYPE_UNKNOWN": 0,
+ "OPEN": 1,
+ "CLOSED": 2,
+ }
+)
+
+func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType {
+ p := new(FeatureSet_EnumType)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_EnumType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+}
+
+func (FeatureSet_EnumType) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[11]
+}
+
+func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_EnumType(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_EnumType.Descriptor instead.
+func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1}
+}
+
+type FeatureSet_RepeatedFieldEncoding int32
+
+const (
+ FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0
+ FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1
+ FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2
+)
+
+// Enum value maps for FeatureSet_RepeatedFieldEncoding.
+var (
+ FeatureSet_RepeatedFieldEncoding_name = map[int32]string{
+ 0: "REPEATED_FIELD_ENCODING_UNKNOWN",
+ 1: "PACKED",
+ 2: "EXPANDED",
+ }
+ FeatureSet_RepeatedFieldEncoding_value = map[string]int32{
+ "REPEATED_FIELD_ENCODING_UNKNOWN": 0,
+ "PACKED": 1,
+ "EXPANDED": 2,
+ }
+)
+
+func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding {
+ p := new(FeatureSet_RepeatedFieldEncoding)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_RepeatedFieldEncoding) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+}
+
+func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[12]
+}
+
+func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_RepeatedFieldEncoding(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead.
+func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2}
+}
+
+type FeatureSet_Utf8Validation int32
+
+const (
+ FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0
+ FeatureSet_NONE FeatureSet_Utf8Validation = 1
+ FeatureSet_VERIFY FeatureSet_Utf8Validation = 2
+)
+
+// Enum value maps for FeatureSet_Utf8Validation.
+var (
+ FeatureSet_Utf8Validation_name = map[int32]string{
+ 0: "UTF8_VALIDATION_UNKNOWN",
+ 1: "NONE",
+ 2: "VERIFY",
+ }
+ FeatureSet_Utf8Validation_value = map[string]int32{
+ "UTF8_VALIDATION_UNKNOWN": 0,
+ "NONE": 1,
+ "VERIFY": 2,
+ }
+)
+
+func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation {
+ p := new(FeatureSet_Utf8Validation)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_Utf8Validation) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+}
+
+func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[13]
+}
+
+func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_Utf8Validation(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead.
+func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3}
+}
+
+type FeatureSet_MessageEncoding int32
+
+const (
+ FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0
+ FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1
+ FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2
+)
+
+// Enum value maps for FeatureSet_MessageEncoding.
+var (
+ FeatureSet_MessageEncoding_name = map[int32]string{
+ 0: "MESSAGE_ENCODING_UNKNOWN",
+ 1: "LENGTH_PREFIXED",
+ 2: "DELIMITED",
+ }
+ FeatureSet_MessageEncoding_value = map[string]int32{
+ "MESSAGE_ENCODING_UNKNOWN": 0,
+ "LENGTH_PREFIXED": 1,
+ "DELIMITED": 2,
+ }
+)
+
+func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding {
+ p := new(FeatureSet_MessageEncoding)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_MessageEncoding) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+}
+
+func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[14]
+}
+
+func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_MessageEncoding(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead.
+func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4}
+}
+
+type FeatureSet_JsonFormat int32
+
+const (
+ FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0
+ FeatureSet_ALLOW FeatureSet_JsonFormat = 1
+ FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2
+)
+
+// Enum value maps for FeatureSet_JsonFormat.
+var (
+ FeatureSet_JsonFormat_name = map[int32]string{
+ 0: "JSON_FORMAT_UNKNOWN",
+ 1: "ALLOW",
+ 2: "LEGACY_BEST_EFFORT",
+ }
+ FeatureSet_JsonFormat_value = map[string]int32{
+ "JSON_FORMAT_UNKNOWN": 0,
+ "ALLOW": 1,
+ "LEGACY_BEST_EFFORT": 2,
+ }
+)
+
+func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat {
+ p := new(FeatureSet_JsonFormat)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_JsonFormat) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+}
+
+func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[15]
+}
+
+func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_JsonFormat(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead.
+func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
+}
+
// Represents the identified object's effect on the element in the original
// .proto file.
type GeneratedCodeInfo_Annotation_Semantic int32
@@ -716,11 +1165,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
}
func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
}
func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[9]
+ return &file_google_protobuf_descriptor_proto_enumTypes[16]
}
func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -739,7 +1188,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error {
// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead.
func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0}
}
// The protocol compiler can output a FileDescriptorSet containing the .proto
@@ -822,8 +1271,8 @@ type FileDescriptorProto struct {
//
// If `edition` is present, this value must be "editions".
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
- // The edition of the proto file, which is an opaque string.
- Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"`
+ // The edition of the proto file.
+ Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
}
func (x *FileDescriptorProto) Reset() {
@@ -942,11 +1391,11 @@ func (x *FileDescriptorProto) GetSyntax() string {
return ""
}
-func (x *FileDescriptorProto) GetEdition() string {
+func (x *FileDescriptorProto) GetEdition() Edition {
if x != nil && x.Edition != nil {
return *x.Edition
}
- return ""
+ return Edition_EDITION_UNKNOWN
}
// Describes a message type.
@@ -1079,13 +1528,14 @@ type ExtensionRangeOptions struct {
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- // go/protobuf-stripping-extension-declarations
- // Like Metadata, but we use a repeated field to hold all extension
- // declarations. This should avoid the size increases of transforming a large
- // extension range into small ranges in generated binaries.
+ // For external users: DO NOT USE. We are in the process of open sourcing
+ // extension declaration and executing internal cleanups before it can be
+ // used externally.
Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The verification state of the range.
- // TODO(b/278783756): flip the default to DECLARATION once all empty ranges
+ // TODO: flip the default to DECLARATION once all empty ranges
// are marked as UNVERIFIED.
Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
}
@@ -1141,6 +1591,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar
return nil
}
+func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState {
if x != nil && x.Verification != nil {
return *x.Verification
@@ -1772,6 +2229,8 @@ type FileOptions struct {
// is empty. When this option is not set, the package name will be used for
// determining the ruby package.
RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string {
return ""
}
+func (x *FileOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2039,11 +2505,13 @@ type MessageOptions struct {
// This should only be used as a temporary measure against broken builds due
// to the change in behavior for JSON field name conflicts.
//
- // TODO(b/261750190) This is legacy behavior we plan to remove once downstream
+ // TODO This is legacy behavior we plan to remove once downstream
// teams have had time to migrate.
//
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool {
return false
}
+func (x *MessageOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2147,7 +2622,9 @@ type FieldOptions struct {
// a more efficient representation on the wire. Rather than repeatedly
// writing the tag and type for each element, the entire array is encoded as
// a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding.
+ // false will avoid using packed encoding. This option is prohibited in
+ // Editions, but the `repeated_field_encoding` feature can be used to control
+ // the behavior.
Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
@@ -2205,11 +2682,12 @@ type FieldOptions struct {
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// Indicate that the field value should not be printed out when using debug
// formats, e.g. when the field contains sensitive credentials.
- DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
- Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
- // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
- Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"`
- Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
+ DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+ Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
+ Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
+ EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2320,17 +2798,23 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention {
return FieldOptions_RETENTION_UNKNOWN
}
-// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
-func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType {
- if x != nil && x.Target != nil {
- return *x.Target
+func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType {
+ if x != nil {
+ return x.Targets
}
- return FieldOptions_TARGET_TYPE_UNKNOWN
+ return nil
}
-func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType {
+func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault {
if x != nil {
- return x.Targets
+ return x.EditionDefaults
+ }
+ return nil
+}
+
+func (x *FieldOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
}
return nil
}
@@ -2348,6 +2832,8 @@ type OneofOptions struct {
unknownFields protoimpl.UnknownFields
extensionFields protoimpl.ExtensionFields
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2384,6 +2870,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13}
}
+func (x *OneofOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2409,11 +2902,13 @@ type EnumOptions struct {
// and strips underscored from the fields before comparison in proto3 only.
// The new behavior takes `json_name` into account and applies to proto2 as
// well.
- // TODO(b/261750190) Remove this legacy behavior once downstream teams have
+ // TODO Remove this legacy behavior once downstream teams have
// had time to migrate.
//
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2477,6 +2972,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool {
return false
}
+func (x *EnumOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2495,13 +2997,20 @@ type EnumValueOptions struct {
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
+ // Indicate that fields annotated with this enum value should not be printed
+ // out when using debug formats, e.g. when the field contains sensitive
+ // credentials.
+ DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
// Default values for EnumValueOptions fields.
const (
- Default_EnumValueOptions_Deprecated = bool(false)
+ Default_EnumValueOptions_Deprecated = bool(false)
+ Default_EnumValueOptions_DebugRedact = bool(false)
)
func (x *EnumValueOptions) Reset() {
@@ -2543,6 +3052,20 @@ func (x *EnumValueOptions) GetDeprecated() bool {
return Default_EnumValueOptions_Deprecated
}
+func (x *EnumValueOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
+func (x *EnumValueOptions) GetDebugRedact() bool {
+ if x != nil && x.DebugRedact != nil {
+ return *x.DebugRedact
+ }
+ return Default_EnumValueOptions_DebugRedact
+}
+
func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2556,6 +3079,8 @@ type ServiceOptions struct {
unknownFields protoimpl.UnknownFields
extensionFields protoimpl.ExtensionFields
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
@@ -2602,6 +3127,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16}
}
+func (x *ServiceOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *ServiceOptions) GetDeprecated() bool {
if x != nil && x.Deprecated != nil {
return *x.Deprecated
@@ -2628,6 +3160,8 @@ type MethodOptions struct {
// this is a formalization for deprecating methods.
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+ // Any features defined in the specific edition.
+ Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2684,6 +3218,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
return Default_MethodOptions_IdempotencyLevel
}
+func (x *MethodOptions) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2763,35 +3304,200 @@ func (x *UninterpretedOption) GetPositiveIntValue() uint64 {
if x != nil && x.PositiveIntValue != nil {
return *x.PositiveIntValue
}
- return 0
+ return 0
+}
+
+func (x *UninterpretedOption) GetNegativeIntValue() int64 {
+ if x != nil && x.NegativeIntValue != nil {
+ return *x.NegativeIntValue
+ }
+ return 0
+}
+
+func (x *UninterpretedOption) GetDoubleValue() float64 {
+ if x != nil && x.DoubleValue != nil {
+ return *x.DoubleValue
+ }
+ return 0
+}
+
+func (x *UninterpretedOption) GetStringValue() []byte {
+ if x != nil {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (x *UninterpretedOption) GetAggregateValue() string {
+ if x != nil && x.AggregateValue != nil {
+ return *x.AggregateValue
+ }
+ return ""
+}
+
+// TODO Enums in C++ gencode (and potentially other languages) are
+// not well scoped. This means that each of the feature enums below can clash
+// with each other. The short names we've chosen maximize call-site
+// readability, but leave us very open to this scenario. A future feature will
+// be designed and implemented to handle this, hopefully before we ever hit a
+// conflict here.
+type FeatureSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+ extensionFields protoimpl.ExtensionFields
+
+ FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+ EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+ RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+ Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+ MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+ JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+}
+
+func (x *FeatureSet) Reset() {
+ *x = FeatureSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FeatureSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet) ProtoMessage() {}
+
+func (x *FeatureSet) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead.
+func (*FeatureSet) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence {
+ if x != nil && x.FieldPresence != nil {
+ return *x.FieldPresence
+ }
+ return FeatureSet_FIELD_PRESENCE_UNKNOWN
+}
+
+func (x *FeatureSet) GetEnumType() FeatureSet_EnumType {
+ if x != nil && x.EnumType != nil {
+ return *x.EnumType
+ }
+ return FeatureSet_ENUM_TYPE_UNKNOWN
+}
+
+func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding {
+ if x != nil && x.RepeatedFieldEncoding != nil {
+ return *x.RepeatedFieldEncoding
+ }
+ return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
+}
+
+func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation {
+ if x != nil && x.Utf8Validation != nil {
+ return *x.Utf8Validation
+ }
+ return FeatureSet_UTF8_VALIDATION_UNKNOWN
+}
+
+func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding {
+ if x != nil && x.MessageEncoding != nil {
+ return *x.MessageEncoding
+ }
+ return FeatureSet_MESSAGE_ENCODING_UNKNOWN
+}
+
+func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
+ if x != nil && x.JsonFormat != nil {
+ return *x.JsonFormat
+ }
+ return FeatureSet_JSON_FORMAT_UNKNOWN
+}
+
+// A compiled specification for the defaults of a set of features. These
+// messages are generated from FeatureSet extensions and can be used to seed
+// feature resolution. The resolution with this object becomes a simple search
+// for the closest matching edition, followed by proto merges.
+type FeatureSetDefaults struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
+ // The minimum supported edition (inclusive) when this was constructed.
+ // Editions before this will not have defaults.
+ MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"`
+ // The maximum known edition (inclusive) when this was constructed. Editions
+ // after this will not have reliable defaults.
+ MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
+}
+
+func (x *FeatureSetDefaults) Reset() {
+ *x = FeatureSetDefaults{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (x *UninterpretedOption) GetNegativeIntValue() int64 {
- if x != nil && x.NegativeIntValue != nil {
- return *x.NegativeIntValue
- }
- return 0
+func (x *FeatureSetDefaults) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (x *UninterpretedOption) GetDoubleValue() float64 {
- if x != nil && x.DoubleValue != nil {
- return *x.DoubleValue
+func (*FeatureSetDefaults) ProtoMessage() {}
+
+func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (x *UninterpretedOption) GetStringValue() []byte {
+// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead.
+func (*FeatureSetDefaults) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault {
if x != nil {
- return x.StringValue
+ return x.Defaults
}
return nil
}
-func (x *UninterpretedOption) GetAggregateValue() string {
- if x != nil && x.AggregateValue != nil {
- return *x.AggregateValue
+func (x *FeatureSetDefaults) GetMinimumEdition() Edition {
+ if x != nil && x.MinimumEdition != nil {
+ return *x.MinimumEdition
}
- return ""
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
+ if x != nil && x.MaximumEdition != nil {
+ return *x.MaximumEdition
+ }
+ return Edition_EDITION_UNKNOWN
}
// Encapsulates information about the original source file from which a
@@ -2855,7 +3561,7 @@ type SourceCodeInfo struct {
func (x *SourceCodeInfo) Reset() {
*x = SourceCodeInfo{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2868,7 +3574,7 @@ func (x *SourceCodeInfo) String() string {
func (*SourceCodeInfo) ProtoMessage() {}
func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2881,7 +3587,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead.
func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21}
}
func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
@@ -2907,7 +3613,7 @@ type GeneratedCodeInfo struct {
func (x *GeneratedCodeInfo) Reset() {
*x = GeneratedCodeInfo{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2920,7 +3626,7 @@ func (x *GeneratedCodeInfo) String() string {
func (*GeneratedCodeInfo) ProtoMessage() {}
func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2933,7 +3639,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead.
func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22}
}
func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
@@ -2956,7 +3662,7 @@ type DescriptorProto_ExtensionRange struct {
func (x *DescriptorProto_ExtensionRange) Reset() {
*x = DescriptorProto_ExtensionRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2969,7 +3675,7 @@ func (x *DescriptorProto_ExtensionRange) String() string {
func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3021,7 +3727,7 @@ type DescriptorProto_ReservedRange struct {
func (x *DescriptorProto_ReservedRange) Reset() {
*x = DescriptorProto_ReservedRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3034,7 +3740,7 @@ func (x *DescriptorProto_ReservedRange) String() string {
func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3078,10 +3784,6 @@ type ExtensionRangeOptions_Declaration struct {
// Metadata.type, Declaration.type must have a leading dot for messages
// and enums.
Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"`
- // Deprecated. Please use "repeated".
- //
- // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
- IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"`
// If true, indicates that the number is reserved in the extension range,
// and any extension field with the number will fail to compile. Set this
// when a declared extension field is deleted.
@@ -3094,7 +3796,7 @@ type ExtensionRangeOptions_Declaration struct {
func (x *ExtensionRangeOptions_Declaration) Reset() {
*x = ExtensionRangeOptions_Declaration{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3107,7 +3809,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string {
func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3144,14 +3846,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string {
return ""
}
-// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
-func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool {
- if x != nil && x.IsRepeated != nil {
- return *x.IsRepeated
- }
- return false
-}
-
func (x *ExtensionRangeOptions_Declaration) GetReserved() bool {
if x != nil && x.Reserved != nil {
return *x.Reserved
@@ -3184,7 +3878,7 @@ type EnumDescriptorProto_EnumReservedRange struct {
func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
*x = EnumDescriptorProto_EnumReservedRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3197,7 +3891,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string {
func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3227,6 +3921,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
return 0
}
+type FieldOptions_EditionDefault struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
+}
+
+func (x *FieldOptions_EditionDefault) Reset() {
+ *x = FieldOptions_EditionDefault{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldOptions_EditionDefault) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldOptions_EditionDefault) ProtoMessage() {}
+
+func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead.
+func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *FieldOptions_EditionDefault) GetEdition() Edition {
+ if x != nil && x.Edition != nil {
+ return *x.Edition
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FieldOptions_EditionDefault) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
+ }
+ return ""
+}
+
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
@@ -3244,7 +3993,7 @@ type UninterpretedOption_NamePart struct {
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3257,7 +4006,7 @@ func (x *UninterpretedOption_NamePart) String() string {
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3287,6 +4036,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
return false
}
+// A map from every known edition with a unique set of defaults to its
+// defaults. Not all editions may be contained here. For a given edition,
+// the defaults at the closest matching edition ordered at or before it should
+// be used. This field must be in strict ascending order by edition.
+type FeatureSetDefaults_FeatureSetEditionDefault struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
+ *x = FeatureSetDefaults_FeatureSetEditionDefault{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead.
+func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition {
+ if x != nil && x.Edition != nil {
+ return *x.Edition
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
type SourceCodeInfo_Location struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -3388,7 +4196,7 @@ type SourceCodeInfo_Location struct {
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3401,7 +4209,7 @@ func (x *SourceCodeInfo_Location) String() string {
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3414,7 +4222,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead.
func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0}
}
func (x *SourceCodeInfo_Location) GetPath() []int32 {
@@ -3475,7 +4283,7 @@ type GeneratedCodeInfo_Annotation struct {
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3488,7 +4296,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3501,7 +4309,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead.
func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0}
}
func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 {
@@ -3550,7 +4358,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
- 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
@@ -3588,527 +4396,687 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65,
- 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
- 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
+ 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
+ 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45,
- 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65,
- 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a,
- 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44,
- 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55,
- 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
- 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
+ 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
+ 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
+ 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61,
- 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a,
- 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22,
- 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e,
- 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01,
- 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68,
- 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a,
- 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63,
- 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
- 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
- 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
- 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65,
- 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34,
- 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49,
- 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49,
- 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
- 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06,
- 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75,
- 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
- 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70,
- 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65,
- 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69,
- 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f,
- 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e,
- 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18,
- 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f,
- 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12,
- 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12,
- 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12,
- 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04,
- 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05,
- 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34,
- 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44,
- 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f,
- 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49,
- 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f,
- 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53,
- 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42,
- 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
- 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
- 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b,
- 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a,
- 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43,
- 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c,
- 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
- 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12,
- 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45,
- 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75,
- 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a,
- 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72,
- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d,
- 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d,
- 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
+ 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
+ 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
- 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83,
- 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
+ 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+ 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
+ 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
+ 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76,
+ 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b,
+ 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d,
+ 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04,
+ 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41,
+ 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45,
+ 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
+ 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74,
+ 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65,
+ 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
+ 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c,
+ 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41,
+ 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36,
+ 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54,
+ 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54,
+ 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58,
+ 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
+ 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a,
+ 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10,
+ 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
+ 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
+ 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45,
+ 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51,
+ 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a,
+ 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
+ 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
+ 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74,
- 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89,
- 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
- 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f,
- 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
+ 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+ 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
+ 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca,
+ 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
+ 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f,
+ 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c,
+ 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61,
+ 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61,
+ 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68,
+ 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c,
+ 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53,
+ 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f,
+ 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+ 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a,
+ 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12,
+ 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20,
+ 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a,
+ 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
+ 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65,
+ 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70,
+ 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a,
+ 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46,
- 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61,
- 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a,
- 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76,
- 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
- 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c,
- 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61,
- 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18,
- 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45,
- 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16,
- 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63,
- 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43,
- 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69,
- 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74,
- 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44,
- 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a,
- 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13,
- 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35,
- 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
- 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
- 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
- 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
- 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
- 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
- 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
- 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
- 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
- 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
- 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64,
- 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
- 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c,
- 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb,
- 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74,
- 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
- 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72,
- 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
- 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28,
- 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
- 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07,
- 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a,
- 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a,
- 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70,
- 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09,
- 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70,
- 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f,
- 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18,
- 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e,
- 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28,
- 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62,
- 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
- 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
- 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
- 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
- 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
- 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
- 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
- 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
- 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
- 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
- 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
- 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
- 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
- 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
- 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
- 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
- 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
- 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
- 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
- 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
- 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
- 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
- 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
- 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
- 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a,
+ 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09,
+ 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44,
+ 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45,
+ 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+ 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c,
+ 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69,
+ 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53,
+ 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f,
+ 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f,
+ 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
- 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
- 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
- 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+ 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56,
+ 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67,
+ 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02,
+ 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65,
+ 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
- 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
- 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a,
- 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76,
- 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74,
- 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50,
- 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10,
- 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64,
- 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17,
- 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49,
- 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a,
- 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8,
- 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e,
+ 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
+ 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09,
+ 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52,
+ 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47,
+ 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+ 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52,
+ 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61,
+ 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
+ 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61,
+ 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61,
+ 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
+ 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65,
+ 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b,
+ 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37,
+ 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07,
+ 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a,
+ 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
+ 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c,
+ 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35,
+ 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
+ 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54,
+ 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d,
+ 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45,
+ 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+ 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e,
+ 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a,
+ 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
+ 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01,
+ 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10,
+ 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41,
+ 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10,
+ 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47,
+ 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a,
+ 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
+ 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41,
+ 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
+ 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+ 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+ 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+ 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
+ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
+ 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70,
+ 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
+ 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a,
+ 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74,
+ 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+ 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
+ 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+ 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
+ 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
+ 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+ 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
+ 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08,
+ 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14,
+ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
+ 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a,
+ 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64,
+ 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
+ 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
+ 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
+ 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65,
+ 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a,
+ 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
+ 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+ 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
+ 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a,
+ 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53,
+ 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54,
+ 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03,
+ 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61,
+ 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e,
+ 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
+ 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
+ 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a,
+ 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61,
+ 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e,
+ 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69,
+ 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88,
+ 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
+ 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50,
+ 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
+ 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50,
+ 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06,
+ 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50,
+ 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52,
+ 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2,
+ 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2,
+ 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72,
+ 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01,
+ 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07,
+ 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e,
+ 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78,
+ 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69,
+ 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
+ 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01,
+ 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46,
+ 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e,
+ 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
- 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a,
- 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e,
- 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76,
- 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
- 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
- 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
- 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
- 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72,
- 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e,
- 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43,
- 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01,
- 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05,
- 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65,
- 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d,
- 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e,
- 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
- 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65,
- 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
- 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65,
- 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0,
- 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
- 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e,
- 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
- 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62,
- 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69,
- 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03,
- 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
- 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73,
- 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e,
- 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
- 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10,
- 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02,
- 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06,
+ 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42,
+ 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a,
+ 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50,
+ 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44,
+ 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+ 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+ 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12,
+ 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52,
+ 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10,
+ 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a,
+ 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
+ 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54,
+ 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
+ 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50,
+ 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e,
+ 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f,
+ 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a,
+ 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
+ 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c,
+ 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22,
+ 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a,
+ 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10,
+ 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54,
+ 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
+ 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
+ 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
+ 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+ 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69,
+ 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d,
+ 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
+ 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a,
+ 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+ 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a,
+ 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02,
+ 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e,
+ 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64,
+ 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74,
+ 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
+ 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64,
+ 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61,
+ 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e,
+ 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d,
+ 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e,
+ 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a,
+ 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e,
+ 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05,
+ 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55,
+ 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a,
+ 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10,
+ 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30,
+ 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17,
+ 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54,
+ 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e,
+ 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
+ 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
+ 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a,
+ 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
+ 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01,
+ 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e,
}
var (
@@ -4123,103 +5091,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
- (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState
- (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type
- (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label
- (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode
- (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType
- (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType
- (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention
- (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType
- (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel
- (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 20: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption
- (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange
- (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart
- (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation
+ (Edition)(0), // 0: google.protobuf.Edition
+ (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState
+ (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type
+ (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label
+ (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode
+ (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType
+ (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType
+ (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention
+ (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType
+ (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel
+ (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence
+ (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType
+ (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
+ (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation
+ (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding
+ (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 27: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption
+ (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet
+ (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults
+ (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange
+ (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault
+ (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart
+ (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
- 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
- 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
- 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
- 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
- 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
- 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
- 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
- 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 49, // [49:49] is the sub-list for method output_type
- 49, // [49:49] is the sub-list for method input_type
- 49, // [49:49] is the sub-list for extension type_name
- 49, // [49:49] is the sub-list for extension extendee
- 0, // [0:49] is the sub-list for field type_name
+ 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
+ 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+ 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+ 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+ 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+ 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+ 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+ 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+ 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+ 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+ 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+ 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+ 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+ 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+ 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+ 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+ 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+ 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+ 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+ 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+ 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+ 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet
+ 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 71, // [71:71] is the sub-list for method output_type
+ 71, // [71:71] is the sub-list for method input_type
+ 71, // [71:71] is the sub-list for extension type_name
+ 71, // [71:71] is the sub-list for extension extendee
+ 0, // [0:71] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -4475,19 +5476,21 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceCodeInfo); i {
+ switch v := v.(*FeatureSet); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
+ case 3:
+ return &v.extensionFields
default:
return nil
}
}
file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GeneratedCodeInfo); i {
+ switch v := v.(*FeatureSetDefaults); i {
case 0:
return &v.state
case 1:
@@ -4499,7 +5502,7 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DescriptorProto_ExtensionRange); i {
+ switch v := v.(*SourceCodeInfo); i {
case 0:
return &v.state
case 1:
@@ -4511,7 +5514,7 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DescriptorProto_ReservedRange); i {
+ switch v := v.(*GeneratedCodeInfo); i {
case 0:
return &v.state
case 1:
@@ -4523,7 +5526,7 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExtensionRangeOptions_Declaration); i {
+ switch v := v.(*DescriptorProto_ExtensionRange); i {
case 0:
return &v.state
case 1:
@@ -4535,7 +5538,7 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
+ switch v := v.(*DescriptorProto_ReservedRange); i {
case 0:
return &v.state
case 1:
@@ -4547,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UninterpretedOption_NamePart); i {
+ switch v := v.(*ExtensionRangeOptions_Declaration); i {
case 0:
return &v.state
case 1:
@@ -4559,7 +5562,7 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceCodeInfo_Location); i {
+ switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
case 0:
return &v.state
case 1:
@@ -4571,6 +5574,54 @@ func file_google_protobuf_descriptor_proto_init() {
}
}
file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldOptions_EditionDefault); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UninterpretedOption_NamePart); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceCodeInfo_Location); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GeneratedCodeInfo_Annotation); i {
case 0:
return &v.state
@@ -4588,8 +5639,8 @@ func file_google_protobuf_descriptor_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
- NumEnums: 10,
- NumMessages: 28,
+ NumEnums: 17,
+ NumMessages: 32,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
index f77ef0de153..54ae1ab87d8 100644
--- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
@@ -49,12 +49,13 @@ type extensionType struct {
// A Message is a dynamically constructed protocol buffer message.
//
-// Message implements the proto.Message interface, and may be used with all
-// standard proto package functions such as Marshal, Unmarshal, and so forth.
+// Message implements the [google.golang.org/protobuf/proto.Message] interface,
+// and may be used with all standard proto package functions
+// such as Marshal, Unmarshal, and so forth.
//
-// Message also implements the protoreflect.Message interface. See the protoreflect
-// package documentation for that interface for how to get and set fields and
-// otherwise interact with the contents of a Message.
+// Message also implements the [protoreflect.Message] interface.
+// See the [protoreflect] package documentation for that interface for how to
+// get and set fields and otherwise interact with the contents of a Message.
//
// Reflection API functions which construct messages, such as NewField,
// return new dynamic messages of the appropriate type. Functions which take
@@ -87,7 +88,7 @@ func NewMessage(desc protoreflect.MessageDescriptor) *Message {
// ProtoMessage implements the legacy message interface.
func (m *Message) ProtoMessage() {}
-// ProtoReflect implements the protoreflect.ProtoMessage interface.
+// ProtoReflect implements the [protoreflect.ProtoMessage] interface.
func (m *Message) ProtoReflect() protoreflect.Message {
return m
}
@@ -115,25 +116,25 @@ func (m *Message) Type() protoreflect.MessageType {
}
// New returns a newly allocated empty message with the same descriptor.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) New() protoreflect.Message {
return m.Type().New()
}
// Interface returns the message.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Interface() protoreflect.ProtoMessage {
return m
}
-// ProtoMethods is an internal detail of the protoreflect.Message interface.
+// ProtoMethods is an internal detail of the [protoreflect.Message] interface.
// Users should never call this directly.
func (m *Message) ProtoMethods() *protoiface.Methods {
return nil
}
// Range visits every populated field in undefined order.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
for num, v := range m.known {
fd := m.ext[num]
@@ -150,7 +151,7 @@ func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value)
}
// Has reports whether a field is populated.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Has(fd protoreflect.FieldDescriptor) bool {
m.checkField(fd)
if fd.IsExtension() && m.ext[fd.Number()] != fd {
@@ -164,7 +165,7 @@ func (m *Message) Has(fd protoreflect.FieldDescriptor) bool {
}
// Clear clears a field.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Clear(fd protoreflect.FieldDescriptor) {
m.checkField(fd)
num := fd.Number()
@@ -173,7 +174,7 @@ func (m *Message) Clear(fd protoreflect.FieldDescriptor) {
}
// Get returns the value of a field.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
m.checkField(fd)
num := fd.Number()
@@ -212,7 +213,7 @@ func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
}
// Mutable returns a mutable reference to a repeated, map, or message field.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
m.checkField(fd)
if !fd.IsMap() && !fd.IsList() && fd.Message() == nil {
@@ -241,7 +242,7 @@ func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
}
// Set stores a value in a field.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
m.checkField(fd)
if m.known == nil {
@@ -284,7 +285,7 @@ func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) {
}
// NewField returns a new value for assignable to the field of a given descriptor.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
m.checkField(fd)
switch {
@@ -305,7 +306,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
}
// WhichOneof reports which field in a oneof is populated, returning nil if none are populated.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
for i := 0; i < od.Fields().Len(); i++ {
fd := od.Fields().Get(i)
@@ -317,13 +318,13 @@ func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.Field
}
// GetUnknown returns the raw unknown fields.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) GetUnknown() protoreflect.RawFields {
return m.unknown
}
// SetUnknown sets the raw unknown fields.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) SetUnknown(r protoreflect.RawFields) {
if m.known == nil {
panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName()))
@@ -332,7 +333,7 @@ func (m *Message) SetUnknown(r protoreflect.RawFields) {
}
// IsValid reports whether the message is valid.
-// See protoreflect.Message for details.
+// See [protoreflect.Message] for details.
func (m *Message) IsValid() bool {
return m.known != nil
}
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
index 5a8010f18fa..c432817bb9c 100644
--- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
@@ -23,13 +23,20 @@ type extField struct {
// A Types is a collection of dynamically constructed descriptors.
// Its methods are safe for concurrent use.
//
-// Types implements protoregistry.MessageTypeResolver and protoregistry.ExtensionTypeResolver.
-// A Types may be used as a proto.UnmarshalOptions.Resolver.
+// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver].
+// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver].
type Types struct {
+ // atomicExtFiles is used with sync/atomic and hence must be the first word
+ // of the struct to guarantee 64-bit alignment.
+ //
+ // TODO(stapelberg): once we only support Go 1.19 and newer, switch this
+ // field to be of type atomic.Uint64 to guarantee alignment on
+ // stack-allocated values, too.
+ atomicExtFiles uint64
+ extMu sync.Mutex
+
files *protoregistry.Files
- extMu sync.Mutex
- atomicExtFiles uint64
extensionsByMessage map[extField]protoreflect.ExtensionDescriptor
}
@@ -45,7 +52,7 @@ func NewTypes(f *protoregistry.Files) *Types {
// FindEnumByName looks up an enum by its full name;
// e.g., "google.protobuf.Field.Kind".
//
-// This returns (nil, protoregistry.NotFound) if not found.
+// This returns (nil, [protoregistry.NotFound]) if not found.
func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) {
d, err := t.files.FindDescriptorByName(name)
if err != nil {
@@ -63,7 +70,7 @@ func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumTyp
// where the extension is declared and is unrelated to the full name of the
// message being extended.
//
-// This returns (nil, protoregistry.NotFound) if not found.
+// This returns (nil, [protoregistry.NotFound]) if not found.
func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) {
d, err := t.files.FindDescriptorByName(name)
if err != nil {
@@ -79,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex
// FindExtensionByNumber looks up an extension field by the field number
// within some parent message, identified by full name.
//
-// This returns (nil, protoregistry.NotFound) if not found.
+// This returns (nil, [protoregistry.NotFound]) if not found.
func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
// Construct the extension number map lazily, since not every user will need it.
// Update the map if new files are added to the registry.
@@ -96,7 +103,7 @@ func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field proto
// FindMessageByName looks up a message by its full name;
// e.g. "google.protobuf.Any".
//
-// This returns (nil, protoregistry.NotFound) if not found.
+// This returns (nil, [protoregistry.NotFound]) if not found.
func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) {
d, err := t.files.FindDescriptorByName(name)
if err != nil {
@@ -112,7 +119,7 @@ func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.Mess
// FindMessageByURL looks up a message by a URL identifier.
// See documentation on google.protobuf.Any.type_url for the URL format.
//
-// This returns (nil, protoregistry.NotFound) if not found.
+// This returns (nil, [protoregistry.NotFound]) if not found.
func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
// This function is similar to FindMessageByName but
// truncates anything before and including '/' in the URL.
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 580b232f477..9de51be5403 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -237,7 +237,8 @@ type Any struct {
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
+ // type.googleapis.com. As of May 2023, there are no widely used type server
+ // implementations and no plans to implement one.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc
new file mode 100644
index 00000000000..730e569b069
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc
@@ -0,0 +1 @@
+'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿]xÝ“Ó2l§G•|qRÞ¯
ö2
5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ7
\ No newline at end of file
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore b/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore
new file mode 100644
index 00000000000..95a851586a5
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/.gitignore
@@ -0,0 +1,8 @@
+*~
+.*.swp
+*.out
+*.test
+*.pem
+*.cov
+jose-util/jose-util
+jose-util.t.err
\ No newline at end of file
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml b/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml
new file mode 100644
index 00000000000..391b99a4014
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/.travis.yml
@@ -0,0 +1,45 @@
+language: go
+
+sudo: false
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+go:
+- '1.14.x'
+- '1.15.x'
+- tip
+
+go_import_path: gopkg.in/square/go-jose.v2
+
+before_script:
+- export PATH=$HOME/.local/bin:$PATH
+
+before_install:
+# Install encrypted gitcookies to get around bandwidth-limits
+# that is causing Travis-CI builds to fail. For more info, see
+# https://github.com/golang/go/issues/12933
+- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true
+- bash .gitcookies.sh || true
+- go get github.com/wadey/gocovmerge
+- go get github.com/mattn/goveralls
+- go get github.com/stretchr/testify/assert
+- go get github.com/stretchr/testify/require
+- go get github.com/google/go-cmp/cmp
+- go get golang.org/x/tools/cmd/cover || true
+- go get code.google.com/p/go.tools/cmd/cover || true
+- pip install cram --user
+
+script:
+- go test . -v -covermode=count -coverprofile=profile.cov
+- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
+- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov
+- go test ./json -v # no coverage for forked encoding/json package
+- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
+- cd ..
+
+after_success:
+- gocovmerge *.cov */*.cov > merged.coverprofile
+- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md
new file mode 100644
index 00000000000..61b183651c0
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+# Contributing
+
+If you would like to contribute code to go-jose you can do so through GitHub by
+forking the repository and sending a pull request.
+
+When submitting code, please make every effort to follow existing conventions
+and style in order to keep the code as readable as possible. Please also make
+sure all tests pass by running `go test`, and format your code with `go fmt`.
+We also recommend using `golint` and `errcheck`.
+
+Before your code can be accepted into the project you must also sign the
+[Individual Contributor License Agreement][1].
+
+ [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md
new file mode 100644
index 00000000000..46b02d61d8a
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/README.md
@@ -0,0 +1,118 @@
+# Go JOSE
+
+[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1)
+[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
+[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
+[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=v2)](https://travis-ci.org/go-jose/go-jose)
+[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/go-jose/go-jose)
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. This includes support for JSON Web Encryption,
+JSON Web Signature, and JSON Web Token standards.
+
+**Disclaimer**: This library contains encryption software that is subject to
+the U.S. Export Administration Regulations. You may not export, re-export,
+transfer or download this code or any part of it in violation of any United
+States law, directive or regulation. In particular this software may not be
+exported or re-exported in any form or on any media to Iran, North Sudan,
+Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
+US maintained blocked list.
+
+## Overview
+
+The implementation follows the
+[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
+[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
+[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519).
+Tables of supported algorithms are shown below. The library supports both
+the compact and full serialization formats, and has optional support for
+multiple recipients. It also comes with a small command-line utility
+([`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util))
+for dealing with JOSE messages in a shell.
+
+**Note**: We use a forked version of the `encoding/json` package from the Go
+standard library which uses case-sensitive matching for member names (instead
+of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
+This is to avoid differences in interpretation of messages between go-jose and
+libraries in other languages.
+
+### Versions
+
+We use [gopkg.in](https://gopkg.in) for versioning.
+
+[Version 2](https://gopkg.in/go-jose/go-jose.v2)
+([branch](https://github.com/go-jose/go-jose/tree/v2),
+[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current version:
+
+ import "gopkg.in/go-jose/go-jose.v2"
+
+The old `v1` branch ([go-jose.v1](https://gopkg.in/go-jose/go-jose.v1)) will
+still receive backported bug fixes and security fixes, but otherwise
+development is frozen. All new feature development takes place on the `v2`
+branch. Version 2 also contains additional sub-packages such as the
+[jwt](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) implementation
+contributed by [@shaxbee](https://github.com/shaxbee).
+
+### Supported algorithms
+
+See below for a table of supported algorithms. Algorithm identifiers match
+the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
+standard where possible. The Godoc reference has a list of constants.
+
+ Key encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSA-PKCS#1v1.5 | RSA1_5
+ RSA-OAEP | RSA-OAEP, RSA-OAEP-256
+ AES key wrap | A128KW, A192KW, A256KW
+ AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
+ ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
+ ECDH-ES (direct) | ECDH-ES1
+ Direct encryption | dir1
+
+1. Not supported in multi-recipient mode
+
+ Signing / MAC | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
+ RSASSA-PSS | PS256, PS384, PS512
+ HMAC | HS256, HS384, HS512
+ ECDSA | ES256, ES384, ES512
+ Ed25519 | EdDSA2
+
+2. Only available in version 2 of the package
+
+ Content encryption | Algorithm identifier(s)
+ :------------------------- | :------------------------------
+ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
+ AES-GCM | A128GCM, A192GCM, A256GCM
+
+ Compression | Algorithm identifiers(s)
+ :------------------------- | -------------------------------
+ DEFLATE (RFC 1951) | DEF
+
+### Supported key types
+
+See below for a table of supported key types. These are understood by the
+library, and can be passed to corresponding functions such as `NewEncrypter` or
+`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
+allows attaching a key id.
+
+ Algorithm(s) | Corresponding types
+ :------------------------- | -------------------------------
+ RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
+ ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
+ EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey)
+ AES, HMAC | []byte
+
+1. Only available in version 2 of the package
+
+## Examples
+
+[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1)
+[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
+
+Examples can be found in the Godoc
+reference for this package. The
+[`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util)
+subdirectory also contains a small command-line utility which might be useful
+as an example.
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go
new file mode 100644
index 00000000000..3ca79cc2682
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go
@@ -0,0 +1,592 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "math/big"
+
+ "golang.org/x/crypto/ed25519"
+ josecipher "gopkg.in/go-jose/go-jose.v2/cipher"
+ "gopkg.in/go-jose/go-jose.v2/json"
+)
+
+// A generic RSA-based encrypter/verifier
+type rsaEncrypterVerifier struct {
+ publicKey *rsa.PublicKey
+}
+
+// A generic RSA-based decrypter/signer
+type rsaDecrypterSigner struct {
+ privateKey *rsa.PrivateKey
+}
+
+// A generic EC-based encrypter/verifier
+type ecEncrypterVerifier struct {
+ publicKey *ecdsa.PublicKey
+}
+
+type edEncrypterVerifier struct {
+ publicKey ed25519.PublicKey
+}
+
+// A key generator for ECDH-ES
+type ecKeyGenerator struct {
+ size int
+ algID string
+ publicKey *ecdsa.PublicKey
+}
+
+// A generic EC-based decrypter/signer
+type ecDecrypterSigner struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+type edDecrypterSigner struct {
+ privateKey ed25519.PrivateKey
+}
+
+// newRSARecipient creates recipientKeyInfo based on the given key.
+func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case RSA1_5, RSA_OAEP, RSA_OAEP_256:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &rsaEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newRSASigner creates a recipientSigInfo based on the given key.
+func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case RS256, RS384, RS512, PS256, PS384, PS512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &rsaDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
+ if sigAlg != EdDSA {
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &edDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// newECDHRecipient creates recipientKeyInfo based on the given key.
+func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch keyAlg {
+ case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientKeyInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return recipientKeyInfo{}, errors.New("invalid public key")
+ }
+
+ return recipientKeyInfo{
+ keyAlg: keyAlg,
+ keyEncrypter: &ecEncrypterVerifier{
+ publicKey: publicKey,
+ },
+ }, nil
+}
+
+// newECDSASigner creates a recipientSigInfo based on the given key.
+func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
+ // Verify that key management algorithm is supported by this encrypter
+ switch sigAlg {
+ case ES256, ES384, ES512:
+ default:
+ return recipientSigInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ if privateKey == nil {
+ return recipientSigInfo{}, errors.New("invalid private key")
+ }
+
+ return recipientSigInfo{
+ sigAlg: sigAlg,
+ publicKey: staticPublicKey(&JSONWebKey{
+ Key: privateKey.Public(),
+ }),
+ signer: &ecDecrypterSigner{
+ privateKey: privateKey,
+ },
+ }, nil
+}
+
+// Encrypt the given payload and update the object.
+func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ encryptedKey, err := ctx.encrypt(cek, alg)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: encryptedKey,
+ header: &rawHeader{},
+ }, nil
+}
+
+// Encrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
+ switch alg {
+ case RSA1_5:
+ return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
+ case RSA_OAEP:
+ return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
+ case RSA_OAEP_256:
+ return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
+}
+
+// Decrypt the given payload. Based on the key encryption algorithm,
+// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
+func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
+ // Note: The random reader on decrypt operations is only used for blinding,
+ // so stubbing is meanlingless (hence the direct use of rand.Reader).
+ switch alg {
+ case RSA1_5:
+ defer func() {
+ // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
+ // because of an index out of bounds error, which we want to ignore.
+ // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
+ // only exists for preventing crashes with unpatched versions.
+ // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
+ // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
+ _ = recover()
+ }()
+
+ // Perform some input validation.
+ keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
+ if keyBytes != len(jek) {
+ // Input size is incorrect, the encrypted payload should always match
+ // the size of the public modulus (e.g. using a 2048 bit key will
+ // produce 256 bytes of output). Reject this since it's invalid input.
+ return nil, ErrCryptoFailure
+ }
+
+ cek, _, err := generator.genKey()
+ if err != nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
+ // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
+ // the Million Message Attack on Cryptographic Message Syntax". We are
+ // therefore deliberately ignoring errors here.
+ _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
+
+ return cek, nil
+ case RSA_OAEP:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ case RSA_OAEP_256:
+ // Use rand.Reader for RSA blinding
+ return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
+ }
+
+ return nil, ErrUnsupportedAlgorithm
+}
+
+// Sign the given payload
+func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ var out []byte
+ var err error
+
+ switch alg {
+ case RS256, RS384, RS512:
+ out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
+ case PS256, PS384, PS512:
+ out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ })
+ }
+
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var hash crypto.Hash
+
+ switch alg {
+ case RS256, PS256:
+ hash = crypto.SHA256
+ case RS384, PS384:
+ hash = crypto.SHA384
+ case RS512, PS512:
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ switch alg {
+ case RS256, RS384, RS512:
+ return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
+ case PS256, PS384, PS512:
+ return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
+ }
+
+ return ErrUnsupportedAlgorithm
+}
+
+// Encrypt the given payload and update the object.
+func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
+ switch alg {
+ case ECDH_ES:
+ // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
+ return recipientInfo{
+ header: &rawHeader{},
+ }, nil
+ case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
+ default:
+ return recipientInfo{}, ErrUnsupportedAlgorithm
+ }
+
+ generator := ecKeyGenerator{
+ algID: string(alg),
+ publicKey: ctx.publicKey,
+ }
+
+ switch alg {
+ case ECDH_ES_A128KW:
+ generator.size = 16
+ case ECDH_ES_A192KW:
+ generator.size = 24
+ case ECDH_ES_A256KW:
+ generator.size = 32
+ }
+
+ kek, header, err := generator.genKey()
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ block, err := aes.NewCipher(kek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ jek, err := josecipher.KeyWrap(block, cek)
+ if err != nil {
+ return recipientInfo{}, err
+ }
+
+ return recipientInfo{
+ encryptedKey: jek,
+ header: &header,
+ }, nil
+}
+
+// Get key size for EC key generator
+func (ctx ecKeyGenerator) keySize() int {
+ return ctx.size
+}
+
+// Get a content encryption key for ECDH-ES
+func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
+ priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
+ if err != nil {
+ return nil, rawHeader{}, err
+ }
+
+ out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
+
+ b, err := json.Marshal(&JSONWebKey{
+ Key: &priv.PublicKey,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ headers := rawHeader{
+ headerEPK: makeRawMessage(b),
+ }
+
+ return out, headers, nil
+}
+
+// Decrypt the given payload and return the content encryption key.
+func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
+ epk, err := headers.getEPK()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+ if epk == nil {
+ return nil, errors.New("go-jose/go-jose: missing epk header")
+ }
+
+ publicKey, ok := epk.Key.(*ecdsa.PublicKey)
+ if publicKey == nil || !ok {
+ return nil, errors.New("go-jose/go-jose: invalid epk header")
+ }
+
+ if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
+ return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
+ }
+
+ apuData, err := headers.getAPU()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apu header")
+ }
+ apvData, err := headers.getAPV()
+ if err != nil {
+ return nil, errors.New("go-jose/go-jose: invalid apv header")
+ }
+
+ deriveKey := func(algID string, size int) []byte {
+ return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
+ }
+
+ var keySize int
+
+ algorithm := headers.getAlgorithm()
+ switch algorithm {
+ case ECDH_ES:
+ // ECDH-ES uses direct key agreement, no key unwrapping necessary.
+ return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
+ case ECDH_ES_A128KW:
+ keySize = 16
+ case ECDH_ES_A192KW:
+ keySize = 24
+ case ECDH_ES_A256KW:
+ keySize = 32
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ key := deriveKey(string(algorithm), keySize)
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return josecipher.KeyUnwrap(block, recipient.encryptedKey)
+}
+
+func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ if alg != EdDSA {
+ return Signature{}, ErrUnsupportedAlgorithm
+ }
+
+ sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
+ if err != nil {
+ return Signature{}, err
+ }
+
+ return Signature{
+ Signature: sig,
+ protected: &rawHeader{},
+ }, nil
+}
+
+func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ if alg != EdDSA {
+ return ErrUnsupportedAlgorithm
+ }
+ ok := ed25519.Verify(ctx.publicKey, payload, signature)
+ if !ok {
+ return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
+ }
+ return nil
+}
+
+// Sign the given payload
+func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
+ var expectedBitSize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ expectedBitSize = 256
+ hash = crypto.SHA256
+ case ES384:
+ expectedBitSize = 384
+ hash = crypto.SHA384
+ case ES512:
+ expectedBitSize = 521
+ hash = crypto.SHA512
+ }
+
+ curveBits := ctx.privateKey.Curve.Params().BitSize
+ if expectedBitSize != curveBits {
+ return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
+ if err != nil {
+ return Signature{}, err
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays and pad
+ // them with zeros on the left to make sure the sizes work out. Both arrays
+ // must be keyBytes long, and the output must be 2*keyBytes long.
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+
+ return Signature{
+ Signature: out,
+ protected: &rawHeader{},
+ }, nil
+}
+
+// Verify the given payload
+func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
+ var keySize int
+ var hash crypto.Hash
+
+ switch alg {
+ case ES256:
+ keySize = 32
+ hash = crypto.SHA256
+ case ES384:
+ keySize = 48
+ hash = crypto.SHA384
+ case ES512:
+ keySize = 66
+ hash = crypto.SHA512
+ default:
+ return ErrUnsupportedAlgorithm
+ }
+
+ if len(signature) != 2*keySize {
+ return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
+ }
+
+ hasher := hash.New()
+
+ // According to documentation, Write() on hash never fails
+ _, _ = hasher.Write(payload)
+ hashed := hasher.Sum(nil)
+
+ r := big.NewInt(0).SetBytes(signature[:keySize])
+ s := big.NewInt(0).SetBytes(signature[keySize:])
+
+ match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
+ if !match {
+ return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
+ }
+
+ return nil
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go
new file mode 100644
index 00000000000..87065a5b96e
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go
@@ -0,0 +1,196 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ nonceBytes = 16
+)
+
+// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
+func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
+ keySize := len(key) / 2
+ integrityKey := key[:keySize]
+ encryptionKey := key[keySize:]
+
+ blockCipher, err := newBlockCipher(encryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var hash func() hash.Hash
+ switch keySize {
+ case 16:
+ hash = sha256.New
+ case 24:
+ hash = sha512.New384
+ case 32:
+ hash = sha512.New
+ }
+
+ return &cbcAEAD{
+ hash: hash,
+ blockCipher: blockCipher,
+ authtagBytes: keySize,
+ integrityKey: integrityKey,
+ }, nil
+}
+
+// An AEAD based on CBC+HMAC
+type cbcAEAD struct {
+ hash func() hash.Hash
+ authtagBytes int
+ integrityKey []byte
+ blockCipher cipher.Block
+}
+
+func (ctx *cbcAEAD) NonceSize() int {
+ return nonceBytes
+}
+
+func (ctx *cbcAEAD) Overhead() int {
+ // Maximum overhead is block size (for padding) plus auth tag length, where
+ // the length of the auth tag is equivalent to the key size.
+ return ctx.blockCipher.BlockSize() + ctx.authtagBytes
+}
+
+// Seal encrypts and authenticates the plaintext.
+func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
+ // Output buffer -- must take care not to mangle plaintext input.
+ ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
+ copy(ciphertext, plaintext)
+ ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
+
+ cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
+
+ cbc.CryptBlocks(ciphertext, ciphertext)
+ authtag := ctx.computeAuthTag(data, nonce, ciphertext)
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
+ copy(out, ciphertext)
+ copy(out[len(ciphertext):], authtag)
+
+ return ret
+}
+
+// Open decrypts and authenticates the ciphertext.
+func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(ciphertext) < ctx.authtagBytes {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
+ }
+
+ offset := len(ciphertext) - ctx.authtagBytes
+ expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
+ match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
+ if match != 1 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
+ }
+
+ cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
+
+ // Make copy of ciphertext buffer, don't want to modify in place
+ buffer := append([]byte{}, []byte(ciphertext[:offset])...)
+
+ if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
+ return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
+ }
+
+ cbc.CryptBlocks(buffer, buffer)
+
+ // Remove padding
+ plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
+ if err != nil {
+ return nil, err
+ }
+
+ ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
+ copy(out, plaintext)
+
+ return ret, nil
+}
+
+// Compute an authentication tag
+func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
+ buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
+ n := 0
+ n += copy(buffer, aad)
+ n += copy(buffer[n:], nonce)
+ n += copy(buffer[n:], ciphertext)
+ binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
+
+ // According to documentation, Write() on hash.Hash never fails.
+ hmac := hmac.New(ctx.hash, ctx.integrityKey)
+ _, _ = hmac.Write(buffer)
+
+ return hmac.Sum(nil)[:ctx.authtagBytes]
+}
+
+// resize ensures that the given slice has a capacity of at least n bytes.
+// If the capacity of the slice is less than n, a new slice is allocated
+// and the existing data will be copied.
+func resize(in []byte, n uint64) (head, tail []byte) {
+ if uint64(cap(in)) >= n {
+ head = in[:n]
+ } else {
+ head = make([]byte, n)
+ copy(head, in)
+ }
+
+ tail = head[len(in):]
+ return
+}
+
+// Apply padding
+func padBuffer(buffer []byte, blockSize int) []byte {
+ missing := blockSize - (len(buffer) % blockSize)
+ ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
+ padding := bytes.Repeat([]byte{byte(missing)}, missing)
+ copy(out, padding)
+ return ret
+}
+
+// Remove padding
+func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
+ if len(buffer)%blockSize != 0 {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ last := buffer[len(buffer)-1]
+ count := int(last)
+
+ if count == 0 || count > blockSize || count > len(buffer) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ padding := bytes.Repeat([]byte{last}, count)
+ if !bytes.HasSuffix(buffer, padding) {
+ return nil, errors.New("go-jose/go-jose: invalid padding")
+ }
+
+ return buffer[:len(buffer)-count], nil
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go
new file mode 100644
index 00000000000..f62c3bdba5d
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go
@@ -0,0 +1,75 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto"
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+type concatKDF struct {
+ z, info []byte
+ i uint32
+ cache []byte
+ hasher hash.Hash
+}
+
+// NewConcatKDF builds a KDF reader based on the given inputs.
+func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
+ buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
+ n := 0
+ n += copy(buffer, algID)
+ n += copy(buffer[n:], ptyUInfo)
+ n += copy(buffer[n:], ptyVInfo)
+ n += copy(buffer[n:], supPubInfo)
+ copy(buffer[n:], supPrivInfo)
+
+ hasher := hash.New()
+
+ return &concatKDF{
+ z: z,
+ info: buffer,
+ hasher: hasher,
+ cache: []byte{},
+ i: 1,
+ }
+}
+
+func (ctx *concatKDF) Read(out []byte) (int, error) {
+ copied := copy(out, ctx.cache)
+ ctx.cache = ctx.cache[copied:]
+
+ for copied < len(out) {
+ ctx.hasher.Reset()
+
+ // Write on a hash.Hash never fails
+ _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
+ _, _ = ctx.hasher.Write(ctx.z)
+ _, _ = ctx.hasher.Write(ctx.info)
+
+ hash := ctx.hasher.Sum(nil)
+ chunkCopied := copy(out[copied:], hash)
+ copied += chunkCopied
+ ctx.cache = hash[chunkCopied:]
+
+ ctx.i++
+ }
+
+ return copied, nil
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go
new file mode 100644
index 00000000000..093c646740b
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go
@@ -0,0 +1,86 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/binary"
+)
+
+// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
+// It is an error to call this function with a private/public key that are not on the same
+// curve. Callers must ensure that the keys are valid before calling this function. Output
+// size may be at most 1<<16 bytes (64 KiB).
+func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
+ if size > 1<<16 {
+ panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
+ }
+
+ // algId, partyUInfo, partyVInfo inputs must be prefixed with the length
+ algID := lengthPrefixed([]byte(alg))
+ ptyUInfo := lengthPrefixed(apuData)
+ ptyVInfo := lengthPrefixed(apvData)
+
+ // suppPubInfo is the encoded length of the output size in bits
+ supPubInfo := make([]byte, 4)
+ binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
+
+ if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
+ panic("public key not on same curve as private key")
+ }
+
+ z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
+ zBytes := z.Bytes()
+
+ // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
+ // the returned byte array. This can lead to a problem where zBytes will be
+ // shorter than expected which breaks the key derivation. Therefore we must pad
+ // to the full length of the expected coordinate here before calling the KDF.
+ octSize := dSize(priv.Curve)
+ if len(zBytes) != octSize {
+ zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
+ }
+
+ reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
+ key := make([]byte, size)
+
+ // Read on the KDF will never fail
+ _, _ = reader.Read(key)
+
+ return key
+}
+
+// dSize returns the size in octets for a coordinate on a elliptic curve.
+func dSize(curve elliptic.Curve) int {
+ order := curve.Params().P
+ bitLen := order.BitLen()
+ size := bitLen / 8
+ if bitLen%8 != 0 {
+ size++
+ }
+ return size
+}
+
+func lengthPrefixed(data []byte) []byte {
+ out := make([]byte, len(data)+4)
+ binary.BigEndian.PutUint32(out, uint32(len(data)))
+ copy(out[4:], data)
+ return out
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go
new file mode 100644
index 00000000000..668358f981b
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go
@@ -0,0 +1,109 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package josecipher
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+)
+
+var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
+
+// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
+func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
+ if len(cek)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := len(cek) / 8
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], cek[i*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer, defaultIV)
+
+ for t := 0; t < 6*n; t++ {
+ copy(buffer[8:], r[t%n])
+
+ block.Encrypt(buffer, buffer)
+
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(r[t%n], buffer[8:])
+ }
+
+ out := make([]byte, (n+1)*8)
+ copy(out, buffer[:8])
+ for i := range r {
+ copy(out[(i+1)*8:], r[i])
+ }
+
+ return out, nil
+}
+
+// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
+func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
+ if len(ciphertext)%8 != 0 {
+ return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
+ }
+
+ n := (len(ciphertext) / 8) - 1
+ r := make([][]byte, n)
+
+ for i := range r {
+ r[i] = make([]byte, 8)
+ copy(r[i], ciphertext[(i+1)*8:])
+ }
+
+ buffer := make([]byte, 16)
+ tBytes := make([]byte, 8)
+ copy(buffer[:8], ciphertext[:8])
+
+ for t := 6*n - 1; t >= 0; t-- {
+ binary.BigEndian.PutUint64(tBytes, uint64(t+1))
+
+ for i := 0; i < 8; i++ {
+ buffer[i] = buffer[i] ^ tBytes[i]
+ }
+ copy(buffer[8:], r[t%n])
+
+ block.Decrypt(buffer, buffer)
+
+ copy(r[t%n], buffer[8:])
+ }
+
+ if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
+ return nil, errors.New("go-jose/go-jose: failed to unwrap key")
+ }
+
+ out := make([]byte, n*8)
+ for i := range r {
+ copy(out[i*8:], r[i])
+ }
+
+ return out, nil
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go
new file mode 100644
index 00000000000..73aab0fabb1
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go
@@ -0,0 +1,542 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "gopkg.in/go-jose/go-jose.v2/json"
+)
+
+// Encrypter represents an encrypter which produces an encrypted JWE object.
+type Encrypter interface {
+ Encrypt(plaintext []byte) (*JSONWebEncryption, error)
+ EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
+ Options() EncrypterOptions
+}
+
+// A generic content cipher
+type contentCipher interface {
+ keySize() int
+ encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
+ decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
+}
+
+// A key generator (for generating/getting a CEK)
+type keyGenerator interface {
+ keySize() int
+ genKey() ([]byte, rawHeader, error)
+}
+
+// A generic key encrypter
+type keyEncrypter interface {
+ encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
+}
+
+// A generic key decrypter
+type keyDecrypter interface {
+ decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
+}
+
+// A generic encrypter based on the given key encrypter and content cipher.
+type genericEncrypter struct {
+ contentAlg ContentEncryption
+ compressionAlg CompressionAlgorithm
+ cipher contentCipher
+ recipients []recipientKeyInfo
+ keyGenerator keyGenerator
+ extraHeaders map[HeaderKey]interface{}
+}
+
+type recipientKeyInfo struct {
+ keyID string
+ keyAlg KeyAlgorithm
+ keyEncrypter keyEncrypter
+}
+
+// EncrypterOptions represents options that can be set on new encrypters.
+type EncrypterOptions struct {
+ Compression CompressionAlgorithm
+
+ // Optional map of additional keys to be inserted into the protected header
+ // of a JWS object. Some specifications which make use of JWS like to insert
+ // additional values here. All values must be JSON-serializable.
+ ExtraHeaders map[HeaderKey]interface{}
+}
+
+// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
+// if necessary. It returns itself and so can be used in a fluent style.
+func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
+ if eo.ExtraHeaders == nil {
+ eo.ExtraHeaders = map[HeaderKey]interface{}{}
+ }
+ eo.ExtraHeaders[k] = v
+ return eo
+}
+
+// WithContentType adds a content type ("cty") header and returns the updated
+// EncrypterOptions.
+func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderContentType, contentType)
+}
+
+// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
+func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
+ return eo.WithHeader(HeaderType, typ)
+}
+
+// Recipient represents an algorithm/key to encrypt messages to.
+//
+// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
+// on the password-based encryption algorithms PBES2-HS256+A128KW,
+// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
+// default of 100000 will be used for the count and a 128-bit random salt will
+// be generated.
+type Recipient struct {
+ Algorithm KeyAlgorithm
+ Key interface{}
+ KeyID string
+ PBES2Count int
+ PBES2Salt []byte
+}
+
+// NewEncrypter creates an appropriate encrypter based on the key type
+func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: getContentCipher(enc),
+ }
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ if encrypter.cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+
+ var keyID string
+ var rawKey interface{}
+ switch encryptionKey := rcpt.Key.(type) {
+ case JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case *JSONWebKey:
+ keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
+ case OpaqueKeyEncrypter:
+ keyID, rawKey = encryptionKey.KeyID(), encryptionKey
+ default:
+ rawKey = encryptionKey
+ }
+
+ switch rcpt.Algorithm {
+ case DIRECT:
+ // Direct encryption mode must be treated differently
+ if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
+ return nil, ErrInvalidKeySize
+ }
+ encrypter.keyGenerator = staticKeyGenerator{
+ key: rawKey.([]byte),
+ }
+ recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ case ECDH_ES:
+ // ECDH-ES (w/o key wrapping) is similar to DIRECT mode
+ typeOf := reflect.TypeOf(rawKey)
+ if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
+ return nil, ErrUnsupportedKeyType
+ }
+ encrypter.keyGenerator = ecKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ algID: string(enc),
+ publicKey: rawKey.(*ecdsa.PublicKey),
+ }
+ recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
+ recipientInfo.keyID = keyID
+ if rcpt.KeyID != "" {
+ recipientInfo.keyID = rcpt.KeyID
+ }
+ encrypter.recipients = []recipientKeyInfo{recipientInfo}
+ return encrypter, nil
+ default:
+ // Can just add a standard recipient
+ encrypter.keyGenerator = randomKeyGenerator{
+ size: encrypter.cipher.keySize(),
+ }
+ err := encrypter.addRecipient(rcpt)
+ return encrypter, err
+ }
+}
+
+// NewMultiEncrypter creates a multi-encrypter based on the given parameters
+func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
+ cipher := getContentCipher(enc)
+
+ if cipher == nil {
+ return nil, ErrUnsupportedAlgorithm
+ }
+ if rcpts == nil || len(rcpts) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
+ }
+
+ encrypter := &genericEncrypter{
+ contentAlg: enc,
+ recipients: []recipientKeyInfo{},
+ cipher: cipher,
+ keyGenerator: randomKeyGenerator{
+ size: cipher.keySize(),
+ },
+ }
+
+ if opts != nil {
+ encrypter.compressionAlg = opts.Compression
+ encrypter.extraHeaders = opts.ExtraHeaders
+ }
+
+ for _, recipient := range rcpts {
+ err := encrypter.addRecipient(recipient)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return encrypter, nil
+}
+
+func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
+ var recipientInfo recipientKeyInfo
+
+ switch recipient.Algorithm {
+ case DIRECT, ECDH_ES:
+ return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
+ }
+
+ recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
+ if recipient.KeyID != "" {
+ recipientInfo.keyID = recipient.KeyID
+ }
+
+ switch recipient.Algorithm {
+ case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
+ if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
+ sr.p2c = recipient.PBES2Count
+ sr.p2s = recipient.PBES2Salt
+ }
+ }
+
+ if err == nil {
+ ctx.recipients = append(ctx.recipients, recipientInfo)
+ }
+ return err
+}
+
+func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
+ switch encryptionKey := encryptionKey.(type) {
+ case *rsa.PublicKey:
+ return newRSARecipient(alg, encryptionKey)
+ case *ecdsa.PublicKey:
+ return newECDHRecipient(alg, encryptionKey)
+ case []byte:
+ return newSymmetricRecipient(alg, encryptionKey)
+ case string:
+ return newSymmetricRecipient(alg, []byte(encryptionKey))
+ case *JSONWebKey:
+ recipient, err := makeJWERecipient(alg, encryptionKey.Key)
+ recipient.keyID = encryptionKey.KeyID
+ return recipient, err
+ }
+ if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
+ return newOpaqueKeyEncrypter(alg, encrypter)
+ }
+ return recipientKeyInfo{}, ErrUnsupportedKeyType
+}
+
+// newDecrypter creates an appropriate decrypter based on the key type
+func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
+ switch decryptionKey := decryptionKey.(type) {
+ case *rsa.PrivateKey:
+ return &rsaDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case *ecdsa.PrivateKey:
+ return &ecDecrypterSigner{
+ privateKey: decryptionKey,
+ }, nil
+ case []byte:
+ return &symmetricKeyCipher{
+ key: decryptionKey,
+ }, nil
+ case string:
+ return &symmetricKeyCipher{
+ key: []byte(decryptionKey),
+ }, nil
+ case JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ case *JSONWebKey:
+ return newDecrypter(decryptionKey.Key)
+ }
+ if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
+ return &opaqueKeyDecrypter{decrypter: okd}, nil
+ }
+ return nil, ErrUnsupportedKeyType
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
+ return ctx.EncryptWithAuthData(plaintext, nil)
+}
+
+// Implementation of encrypt method producing a JWE object.
+func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
+ obj := &JSONWebEncryption{}
+ obj.aad = aad
+
+ obj.protected = &rawHeader{}
+ err := obj.protected.set(headerEncryption, ctx.contentAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.recipients = make([]recipientInfo, len(ctx.recipients))
+
+ if len(ctx.recipients) == 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
+ }
+
+ cek, headers, err := ctx.keyGenerator.genKey()
+ if err != nil {
+ return nil, err
+ }
+
+ obj.protected.merge(&headers)
+
+ for i, info := range ctx.recipients {
+ recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ err = recipient.header.set(headerAlgorithm, info.keyAlg)
+ if err != nil {
+ return nil, err
+ }
+
+ if info.keyID != "" {
+ err = recipient.header.set(headerKeyID, info.keyID)
+ if err != nil {
+ return nil, err
+ }
+ }
+ obj.recipients[i] = recipient
+ }
+
+ if len(ctx.recipients) == 1 {
+ // Move per-recipient headers into main protected header if there's
+ // only a single recipient.
+ obj.protected.merge(obj.recipients[0].header)
+ obj.recipients[0].header = nil
+ }
+
+ if ctx.compressionAlg != NONE {
+ plaintext, err = compress(ctx.compressionAlg, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ err = obj.protected.set(headerCompression, ctx.compressionAlg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for k, v := range ctx.extraHeaders {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ (*obj.protected)[k] = makeRawMessage(b)
+ }
+
+ authData := obj.computeAuthData()
+ parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
+ if err != nil {
+ return nil, err
+ }
+
+ obj.iv = parts.iv
+ obj.ciphertext = parts.ciphertext
+ obj.tag = parts.tag
+
+ return obj, nil
+}
+
+func (ctx *genericEncrypter) Options() EncrypterOptions {
+ return EncrypterOptions{
+ Compression: ctx.compressionAlg,
+ ExtraHeaders: ctx.extraHeaders,
+ }
+}
+
+// Decrypt and validate the object and return the plaintext. Note that this
+// function does not support multi-recipient, if you desire multi-recipient
+// decryption use DecryptMulti instead.
+func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
+ headers := obj.mergedHeaders(nil)
+
+ if len(obj.recipients) > 1 {
+ return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
+ }
+
+ critical, err := headers.getCritical()
+ if err != nil {
+ return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return nil, err
+ }
+
+ cipher := getContentCipher(headers.getEncryption())
+ if cipher == nil {
+ return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ var plaintext []byte
+ recipient := obj.recipients[0]
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ }
+
+ if plaintext == nil {
+ return nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ }
+
+ return plaintext, err
+}
+
+// DecryptMulti decrypts and validates the object and returns the plaintexts,
+// with support for multiple recipients. It returns the index of the recipient
+// for which the decryption was successful, the merged headers for that recipient,
+// and the plaintext.
+func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
+ globalHeaders := obj.mergedHeaders(nil)
+
+ critical, err := globalHeaders.getCritical()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
+ }
+
+ if len(critical) > 0 {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
+ }
+
+ decrypter, err := newDecrypter(decryptionKey)
+ if err != nil {
+ return -1, Header{}, nil, err
+ }
+
+ encryption := globalHeaders.getEncryption()
+ cipher := getContentCipher(encryption)
+ if cipher == nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
+ }
+
+ generator := randomKeyGenerator{
+ size: cipher.keySize(),
+ }
+
+ parts := &aeadParts{
+ iv: obj.iv,
+ ciphertext: obj.ciphertext,
+ tag: obj.tag,
+ }
+
+ authData := obj.computeAuthData()
+
+ index := -1
+ var plaintext []byte
+ var headers rawHeader
+
+ for i, recipient := range obj.recipients {
+ recipientHeaders := obj.mergedHeaders(&recipient)
+
+ cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
+ if err == nil {
+ // Found a valid CEK -- let's try to decrypt.
+ plaintext, err = cipher.decrypt(cek, authData, parts)
+ if err == nil {
+ index = i
+ headers = recipientHeaders
+ break
+ }
+ }
+ }
+
+ if plaintext == nil || err != nil {
+ return -1, Header{}, nil, ErrCryptoFailure
+ }
+
+ // The "zip" header parameter may only be present in the protected header.
+ if comp := obj.protected.getCompression(); comp != "" {
+ plaintext, err = decompress(comp, plaintext)
+ }
+
+ sanitized, err := headers.sanitized()
+ if err != nil {
+ return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
+ }
+
+ return index, sanitized, plaintext, err
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go b/vendor/gopkg.in/go-jose/go-jose.v2/doc.go
new file mode 100644
index 00000000000..dd1387f3f06
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/doc.go
@@ -0,0 +1,27 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+
+Package jose aims to provide an implementation of the Javascript Object Signing
+and Encryption set of standards. It implements encryption and signing based on
+the JSON Web Encryption and JSON Web Signature standards, with optional JSON
+Web Token support available in a sub-package. The library supports both the
+compact and full serialization formats, and has optional support for multiple
+recipients.
+
+*/
+package jose
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go
new file mode 100644
index 00000000000..40b688b3d2d
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go
@@ -0,0 +1,185 @@
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/base64"
+ "encoding/binary"
+ "io"
+ "math/big"
+ "strings"
+ "unicode"
+
+ "gopkg.in/go-jose/go-jose.v2/json"
+)
+
+// Helper function to serialize known-good objects.
+// Precondition: value is not a nil pointer.
+func mustSerializeJSON(value interface{}) []byte {
+ out, err := json.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+ // We never want to serialize the top-level value "null," since it's not a
+ // valid JOSE message. But if a caller passes in a nil pointer to this method,
+ // MarshalJSON will happily serialize it as the top-level value "null". If
+ // that value is then embedded in another operation, for instance by being
+ // base64-encoded and fed as input to a signing algorithm
+ // (https://github.com/go-jose/go-jose/issues/22), the result will be
+ // incorrect. Because this method is intended for known-good objects, and a nil
+ // pointer is not a known-good object, we are free to panic in this case.
+ // Note: It's not possible to directly check whether the data pointed at by an
+ // interface is a nil pointer, so we do this hacky workaround.
+ // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
+ if string(out) == "null" {
+ panic("Tried to serialize a nil pointer.")
+ }
+ return out
+}
+
+// Strip all newlines and whitespace
+func stripWhitespace(data string) string {
+ buf := strings.Builder{}
+ buf.Grow(len(data))
+ for _, r := range data {
+ if !unicode.IsSpace(r) {
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String()
+}
+
+// Perform compression based on algorithm
+func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return deflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Perform decompression based on algorithm
+func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
+ switch algorithm {
+ case DEFLATE:
+ return inflate(input)
+ default:
+ return nil, ErrUnsupportedAlgorithm
+ }
+}
+
+// Compress with DEFLATE
+func deflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+
+ // Writing to byte buffer, err is always nil
+ writer, _ := flate.NewWriter(output, 1)
+ _, _ = io.Copy(writer, bytes.NewBuffer(input))
+
+ err := writer.Close()
+ return output.Bytes(), err
+}
+
+// Decompress with DEFLATE
+func inflate(input []byte) ([]byte, error) {
+ output := new(bytes.Buffer)
+ reader := flate.NewReader(bytes.NewBuffer(input))
+
+ _, err := io.Copy(output, reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = reader.Close()
+ return output.Bytes(), err
+}
+
+// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
+type byteBuffer struct {
+ data []byte
+}
+
+func newBuffer(data []byte) *byteBuffer {
+ if data == nil {
+ return nil
+ }
+ return &byteBuffer{
+ data: data,
+ }
+}
+
+func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
+ if len(data) > length {
+ panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
+ }
+ pad := make([]byte, length-len(data))
+ return newBuffer(append(pad, data...))
+}
+
+func newBufferFromInt(num uint64) *byteBuffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, num)
+ return newBuffer(bytes.TrimLeft(data, "\x00"))
+}
+
+func (b *byteBuffer) MarshalJSON() ([]byte, error) {
+ return json.Marshal(b.base64())
+}
+
+func (b *byteBuffer) UnmarshalJSON(data []byte) error {
+ var encoded string
+ err := json.Unmarshal(data, &encoded)
+ if err != nil {
+ return err
+ }
+
+ if encoded == "" {
+ return nil
+ }
+
+ decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+ if err != nil {
+ return err
+ }
+
+ *b = *newBuffer(decoded)
+
+ return nil
+}
+
+func (b *byteBuffer) base64() string {
+ return base64.RawURLEncoding.EncodeToString(b.data)
+}
+
+func (b *byteBuffer) bytes() []byte {
+ // Handling nil here allows us to transparently handle nil slices when serializing.
+ if b == nil {
+ return nil
+ }
+ return b.data
+}
+
+func (b byteBuffer) bigInt() *big.Int {
+ return new(big.Int).SetBytes(b.data)
+}
+
+func (b byteBuffer) toInt() int {
+ return int(b.bigInt().Int64())
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE
new file mode 100644
index 00000000000..74487567632
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md
new file mode 100644
index 00000000000..86de5e5581f
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md
@@ -0,0 +1,13 @@
+# Safe JSON
+
+This repository contains a fork of the `encoding/json` package from Go 1.6.
+
+The following changes were made:
+
+* Object deserialization uses case-sensitive member name matching instead of
+ [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
+ This is to avoid differences in the interpretation of JOSE messages between
+ go-jose and libraries written in other languages.
+* When deserializing a JSON object, we check for duplicate keys and reject the
+ input whenever we detect a duplicate. Rather than trying to work with malformed
+ data, we prefer to reject it right away.
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go
new file mode 100644
index 00000000000..4dbc4146cf9
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go
@@ -0,0 +1,1217 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a string-keyed map, Unmarshal first
+// establishes a map to use, If the map is nil, Unmarshal allocates a new map.
+// Otherwise Unmarshal reuses the existing map, keeping existing entries.
+// Unmarshal then stores key-value pairs from the JSON object into the map.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+type NumberUnmarshalType int
+
+const (
+ // unmarshal a JSON number into an interface{} as a float64
+ UnmarshalFloat NumberUnmarshalType = iota
+ // unmarshal a JSON number into an interface{} as a `json.Number`
+ UnmarshalJSONNumber
+ // unmarshal a JSON number into an interface{} as a int64
+ // if value is an integer otherwise float64
+ UnmarshalIntOrFloat
+)
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ numberType NumberUnmarshalType
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, []byte(key)) {
+ f = ff
+ break
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64, int64 or a Number
+// depending on d.numberDecodeType.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ switch d.numberType {
+
+ case UnmarshalJSONNumber:
+ return Number(s), nil
+ case UnmarshalIntOrFloat:
+ v, err := strconv.ParseInt(s, 10, 64)
+ if err == nil {
+ return v, nil
+ }
+
+ // tries to parse integer number in scientific notation
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+
+ // if it has no decimal value use int64
+ if fi, fd := math.Modf(f); fd == 0.0 {
+ return int64(fi), nil
+ }
+ return f, nil
+ default:
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+ }
+
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ keys := map[string]bool{}
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Check for duplicate keys.
+ _, ok = keys[key]
+ if !ok {
+ keys[key] = true
+ } else {
+ d.error(fmt.Errorf("json: duplicate key '%s' in object", key))
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go
new file mode 100644
index 00000000000..1dae8bb7cd8
--- /dev/null
+++ b/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go
@@ -0,0 +1,1197 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML